self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
if not self.method:
bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
- self.unihashes = bb.persist_data.persist('SSTATESIG_UNIHASH_CACHE_v1_' + self.method.replace('.', '_'), data)
def get_taskdata(self):
return (self.server, self.method) + super().get_taskdata()
# If a unique hash is reported, use it as the stampfile hash. This
# ensures that if a task won't be re-run if the taskhash changes,
# but it would result in the same output hash
- unihash = self.unihashes.get(self.__get_task_unihash_key(task))
+ unihash = self.unitaskhashes.get(self.__get_task_unihash_key(task), None)
if unihash is not None:
return unihash
return super().get_stampfile_hash(task)
+ def set_unihash(self, task, unihash):
+ self.unitaskhashes[self.__get_task_unihash_key(task)] = unihash
+
def get_unihash(self, task):
import urllib
import json
# TODO: This cache can grow unbounded. It probably only needs to keep
# for each task
- unihash = self.unihashes.get(key)
+ unihash = self.unitaskhashes.get(key, None)
if unihash is not None:
return unihash
except (KeyError, json.JSONDecodeError) as e:
bb.warn('Poorly formatted response from %s: %s' % (self.server, str(e)))
- self.unihashes[key] = unihash
+ self.unitaskhashes[key] = unihash
return unihash
def report_unihash(self, path, task, d):
key = fn + '.do_' + task + ':' + taskhash
# Sanity checks
- cache_unihash = self.unihashes.get(key)
+ cache_unihash = self.unitaskhashes.get(key, None)
if cache_unihash is None:
bb.fatal('%s not in unihash cache. Please report this error' % key)
if new_unihash != unihash:
bb.debug(1, 'Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
+ bb.event.fire(bb.runqueue.taskUniHashUpdate(fn + ':do_' + task, new_unihash), d)
else:
bb.debug(1, 'Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
except urllib.error.URLError as e: