]> code.ossystems Code Review - openembedded-core.git/commitdiff
bitbake: Update to bitbake 1.8 branch head
authorRichard Purdie <richard@openedhand.com>
Mon, 3 Mar 2008 22:01:45 +0000 (22:01 +0000)
committerRichard Purdie <richard@openedhand.com>
Mon, 3 Mar 2008 22:01:45 +0000 (22:01 +0000)
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@3892 311d38ba-8fff-0310-9ca6-ca027cbcb966

13 files changed:
bitbake/ChangeLog
bitbake/lib/bb/__init__.py
bitbake/lib/bb/build.py
bitbake/lib/bb/cache.py
bitbake/lib/bb/cooker.py
bitbake/lib/bb/data_smart.py
bitbake/lib/bb/event.py
bitbake/lib/bb/fetch/__init__.py
bitbake/lib/bb/parse/parse_py/BBHandler.py
bitbake/lib/bb/runqueue.py
bitbake/lib/bb/shell.py
bitbake/lib/bb/taskdata.py
bitbake/lib/bb/utils.py

index 7e6b7b26a45e584883c0ca8268120c642be78356..d074a5e239108e54d09316d4d7342319be2b062c 100644 (file)
@@ -1,6 +1,26 @@
 Changes in BitBake 1.8.x:
        - Fix exit code for build failures in --continue mode
        - Fix git branch tags fetching
+       - Change parseConfigurationFile so it works on real data, not a copy
+       - Handle 'base' inherit and all other INHERITs from parseConfigurationFile 
+         instead of BBHandler
+       - Fix getVarFlags bug in data_smart
+       - Optmise cache handling by more quickly detecting an invalid cache, only 
+         saving the cache when its changed, moving the cache validity check into
+         the parsing loop and factoring some getVar calls outside a for loop
+       - Cooker: Remove a debug message from the parsing loop to lower overhead
+       - Convert build.py exec_task to use getVarFlags
+       - Update shell to use cooker.buildFile
+       - Add StampUpdate event
+       - Convert -b option to use taskdata/runqueue
+       - Remove digraph and switch to new stamp checking code. exec_task no longer
+         honours dependencies
+       - Make fetcher timestamp updating non-fatal when permissions don't allow 
+         updates
+       - Add BB_SCHEDULER variable/option ("completion" or "speed") controlling
+         the way bitbake schedules tasks
+       - Add BB_STAMP_POLICY variable/option ("perfile" or "full") controlling
+         how extensively stamps are looked at for validity
 
 Changes in BitBake 1.8.10:
        - Psyco is available only for x86 - do not use it on other architectures.
index a126c17693938a1661d6660315dce8e7bd251cf1..c452d529c1e2382394ff4ec4121138b64d8e5d17 100644 (file)
@@ -46,7 +46,6 @@ __all__ = [
     "pkgcmp",
     "dep_parenreduce",
     "dep_opconvert",
-    "digraph",
 
 # fetch
     "decodeurl",
@@ -1128,184 +1127,6 @@ def dep_opconvert(mysplit, myuse):
             mypos += 1
     return newsplit
 
-class digraph:
-    """beautiful directed graph object"""
-
-    def __init__(self):
-        self.dict={}
-        #okeys = keys, in order they were added (to optimize firstzero() ordering)
-        self.okeys=[]
-        self.__callback_cache=[]
-
-    def __str__(self):
-        str = ""
-        for key in self.okeys:
-            str += "%s:\t%s\n" % (key, self.dict[key][1])
-        return str
-
-    def addnode(self,mykey,myparent):
-        if not mykey in self.dict:
-            self.okeys.append(mykey)
-            if myparent==None:
-                self.dict[mykey]=[0,[]]
-            else:
-                self.dict[mykey]=[0,[myparent]]
-                self.dict[myparent][0]=self.dict[myparent][0]+1
-            return
-        if myparent and (not myparent in self.dict[mykey][1]):
-            self.dict[mykey][1].append(myparent)
-            self.dict[myparent][0]=self.dict[myparent][0]+1
-
-    def delnode(self,mykey, ref = 1):
-        """Delete a node
-
-        If ref is 1, remove references to this node from other nodes.
-        If ref is 2, remove nodes that reference this node."""
-        if not mykey in self.dict:
-            return
-        for x in self.dict[mykey][1]:
-            self.dict[x][0]=self.dict[x][0]-1
-        del self.dict[mykey]
-        while 1:
-            try:
-                self.okeys.remove(mykey)
-            except ValueError:
-                break
-        if ref:
-            __kill = []
-            for k in self.okeys:
-                if mykey in self.dict[k][1]:
-                    if ref == 1 or ref == 2:
-                        self.dict[k][1].remove(mykey)
-                    if ref == 2:
-                        __kill.append(k)
-            for l in __kill:
-                self.delnode(l, ref)
-
-    def allnodes(self):
-        "returns all nodes in the dictionary"
-        keys = self.dict.keys()
-        ret = []
-        for key in keys:
-            ret.append(key)
-        ret.sort()
-        return ret
-
-    def firstzero(self):
-        "returns first node with zero references, or NULL if no such node exists"
-        for x in self.okeys:
-            if self.dict[x][0]==0:
-                return x
-        return None
-
-    def firstnonzero(self):
-        "returns first node with nonzero references, or NULL if no such node exists"
-        for x in self.okeys:
-            if self.dict[x][0]!=0:
-                return x
-        return None
-
-
-    def allzeros(self):
-        "returns all nodes with zero references, or NULL if no such node exists"
-        zerolist = []
-        for x in self.dict.keys():
-            if self.dict[x][0]==0:
-                zerolist.append(x)
-        return zerolist
-
-    def hasallzeros(self):
-        "returns 0/1, Are all nodes zeros? 1 : 0"
-        zerolist = []
-        for x in self.dict.keys():
-            if self.dict[x][0]!=0:
-                return 0
-        return 1
-
-    def empty(self):
-        if len(self.dict)==0:
-            return 1
-        return 0
-
-    def hasnode(self,mynode):
-        return mynode in self.dict
-
-    def getparents(self, item):
-        if not self.hasnode(item):
-            return []
-        parents = self.dict[item][1]
-        ret = []
-        for parent in parents:
-            ret.append(parent)
-        ret.sort()
-        return ret
-
-    def getchildren(self, item):
-        if not self.hasnode(item):
-            return []
-        children = [i for i in self.okeys if item in self.getparents(i)]
-        return children
-
-    def walkdown(self, item, callback, debug = None, usecache = False):
-        if not self.hasnode(item):
-            return 0
-
-        if usecache:
-            if self.__callback_cache.count(item):
-                if debug:
-                    print "hit cache for item: %s" % item
-                return 1
-
-        parents = self.getparents(item)
-        children = self.getchildren(item)
-        for p in parents:
-            if p in children:
-#                print "%s is both parent and child of %s" % (p, item)
-                if usecache:
-                    self.__callback_cache.append(p)
-                ret = callback(self, p)
-                if ret == 0:
-                    return 0
-                continue
-            if item == p:
-                print "eek, i'm my own parent!"
-                return 0
-            if debug:
-                print "item: %s, p: %s" % (item, p)
-            ret = self.walkdown(p, callback, debug, usecache)
-            if ret == 0:
-                return 0
-        if usecache:
-            self.__callback_cache.append(item)
-        return callback(self, item)
-
-    def walkup(self, item, callback):
-        if not self.hasnode(item):
-            return 0
-
-        parents = self.getparents(item)
-        children = self.getchildren(item)
-        for c in children:
-            if c in parents:
-                ret = callback(self, item)
-                if ret == 0:
-                    return 0
-                continue
-            if item == c:
-                print "eek, i'm my own child!"
-                return 0
-            ret = self.walkup(c, callback)
-            if ret == 0:
-                return 0
-        return callback(self, item)
-
-    def copy(self):
-        mygraph=digraph()
-        for x in self.dict.keys():
-            mygraph.dict[x]=self.dict[x][:]
-            mygraph.okeys=self.okeys[:]
-        return mygraph
-
 if __name__ == "__main__":
     import doctest, bb
     doctest.testmod(bb)
index 1c015fe9a3f38679c5f4c3ede9d0e3beb7359c66..25c03a0a4edbf46f564f677c3f90aeb8d26dfbed 100644 (file)
@@ -74,12 +74,21 @@ def exec_func(func, d, dirs = None):
     if not body:
         return
 
-    cleandirs = (data.expand(data.getVarFlag(func, 'cleandirs', d), d) or "").split()
+    flags = data.getVarFlags(func, d)
+    for item in ['deps', 'check', 'interactive', 'python', 'cleandirs', 'dirs', 'lockfiles', 'fakeroot']:
+        if not item in flags:
+            flags[item] = None
+
+    ispython = flags['python']
+
+    cleandirs = (data.expand(flags['cleandirs'], d) or "").split()
     for cdir in cleandirs:
         os.system("rm -rf %s" % cdir)
 
-    if not dirs:
-        dirs = (data.expand(data.getVarFlag(func, 'dirs', d), d) or "").split()
+    if dirs:
+        dirs = data.expand(dirs, d)
+    else:
+        dirs = (data.expand(flags['dirs'], d) or "").split()
     for adir in dirs:
         mkdirhier(adir)
 
@@ -88,24 +97,22 @@ def exec_func(func, d, dirs = None):
     else:
         adir = data.getVar('B', d, 1)
 
-    adir = data.expand(adir, d)
-
     try:
         prevdir = os.getcwd()
     except OSError:
-        prevdir = data.expand('${TOPDIR}', d)
+        prevdir = data.getVar('TOPDIR', d, True)
     if adir and os.access(adir, os.F_OK):
         os.chdir(adir)
 
     locks = []
-    lockfiles = (data.expand(data.getVarFlag(func, 'lockfiles', d), d) or "").split()
+    lockfiles = (data.expand(flags['lockfiles'], d) or "").split()
     for lock in lockfiles:
         locks.append(bb.utils.lockfile(lock))
 
-    if data.getVarFlag(func, "python", d):
+    if flags['python']:
         exec_func_python(func, d)
     else:
-        exec_func_shell(func, d)
+        exec_func_shell(func, d, flags)
 
     for lock in locks:
         bb.utils.unlockfile(lock)
@@ -117,19 +124,20 @@ def exec_func_python(func, d):
     """Execute a python BB 'function'"""
     import re, os
 
+    bbfile = bb.data.getVar('FILE', d, 1)
     tmp  = "def " + func + "():\n%s" % data.getVar(func, d)
     tmp += '\n' + func + '()'
-    comp = utils.better_compile(tmp, func, bb.data.getVar('FILE', d, 1) )
+    comp = utils.better_compile(tmp, func, bbfile)
     prevdir = os.getcwd()
     g = {} # globals
     g['bb'] = bb
     g['os'] = os
     g['d'] = d
-    utils.better_exec(comp,g,tmp, bb.data.getVar('FILE',d,1))
+    utils.better_exec(comp, g, tmp, bbfile)
     if os.path.exists(prevdir):
         os.chdir(prevdir)
 
-def exec_func_shell(func, d):
+def exec_func_shell(func, d, flags):
     """Execute a shell BB 'function' Returns true if execution was successful.
 
     For this, it creates a bash shell script in the tmp dectory, writes the local
@@ -141,9 +149,9 @@ def exec_func_shell(func, d):
     """
     import sys
 
-    deps = data.getVarFlag(func, 'deps', d)
-    check = data.getVarFlag(func, 'check', d)
-    interact = data.getVarFlag(func, 'interactive', d)
+    deps = flags['deps']
+    check = flags['check']
+    interact = flags['interactive']
     if check in globals():
         if globals()[check](func, deps):
             return
@@ -195,7 +203,7 @@ def exec_func_shell(func, d):
 
     # execute function
     prevdir = os.getcwd()
-    if data.getVarFlag(func, "fakeroot", d):
+    if flags['fakeroot']:
         maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1)
     else:
         maybe_fakeroot = ''
@@ -255,72 +263,29 @@ def exec_task(task, d):
        a function is that a task exists in the task digraph, and therefore
        has dependencies amongst other tasks."""
 
-    # check if the task is in the graph..
-    task_graph = data.getVar('_task_graph', d)
-    if not task_graph:
-        task_graph = bb.digraph()
-        data.setVar('_task_graph', task_graph, d)
-    task_cache = data.getVar('_task_cache', d)
-    if not task_cache:
-        task_cache = []
-        data.setVar('_task_cache', task_cache, d)
-    if not task_graph.hasnode(task):
-        raise EventException("Missing node in task graph", InvalidTask(task, d))
-
-    # check whether this task needs executing..
-    if stamp_is_current(task, d):
-        return 1
-
-    # follow digraph path up, then execute our way back down
-    def execute(graph, item):
-        if data.getVarFlag(item, 'task', d):
-            if item in task_cache:
-                return 1
-
-            if task != item:
-                # deeper than toplevel, exec w/ deps
-                exec_task(item, d)
-                return 1
-
-            try:
-                bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % item)
-                old_overrides = data.getVar('OVERRIDES', d, 0)
-                localdata = data.createCopy(d)
-                data.setVar('OVERRIDES', 'task_%s:%s' % (item, old_overrides), localdata)
-                data.update_data(localdata)
-                event.fire(TaskStarted(item, localdata))
-                exec_func(item, localdata)
-                event.fire(TaskSucceeded(item, localdata))
-                task_cache.append(item)
-                data.setVar('_task_cache', task_cache, d)
-            except FuncFailed, reason:
-                bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % reason )
-                failedevent = TaskFailed(item, d)
-                event.fire(failedevent)
-                raise EventException("Function failed in task: %s" % reason, failedevent)
-
-    if data.getVarFlag(task, 'dontrundeps', d):
-        execute(None, task)
-    else:
-        task_graph.walkdown(task, execute)
+    # Check whther this is a valid task
+    if not data.getVarFlag(task, 'task', d):
+        raise EventException("No such task", InvalidTask(task, d))
+
+    try:
+        bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task)
+        old_overrides = data.getVar('OVERRIDES', d, 0)
+        localdata = data.createCopy(d)
+        data.setVar('OVERRIDES', 'task_%s:%s' % (task, old_overrides), localdata)
+        data.update_data(localdata)
+        event.fire(TaskStarted(task, localdata))
+        exec_func(task, localdata)
+        event.fire(TaskSucceeded(task, localdata))
+    except FuncFailed, reason:
+        bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % reason )
+        failedevent = TaskFailed(task, d)
+        event.fire(failedevent)
+        raise EventException("Function failed in task: %s" % reason, failedevent)
 
     # make stamp, or cause event and raise exception
     if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d):
         make_stamp(task, d)
 
-def extract_stamp_data(d, fn):
-    """
-    Extracts stamp data from d which is either a data dictonary (fn unset) 
-    or a dataCache entry (fn set). 
-    """
-    if fn:
-        return (d.task_queues[fn], d.stamp[fn], d.task_deps[fn])
-    task_graph = data.getVar('_task_graph', d)
-    if not task_graph:
-        task_graph = bb.digraph()
-        data.setVar('_task_graph', task_graph, d)
-    return (task_graph, data.getVar('STAMP', d, 1), None)
-
 def extract_stamp(d, fn):
     """
     Extracts stamp format which is either a data dictonary (fn unset) 
@@ -330,49 +295,6 @@ def extract_stamp(d, fn):
         return d.stamp[fn]
     return data.getVar('STAMP', d, 1)
 
-def stamp_is_current(task, d, file_name = None, checkdeps = 1):
-    """
-    Check status of a given task's stamp. 
-    Returns 0 if it is not current and needs updating.
-    (d can be a data dict or dataCache)
-    """
-
-    (task_graph, stampfn, taskdep) = extract_stamp_data(d, file_name)
-
-    if not stampfn:
-        return 0
-
-    stampfile = "%s.%s" % (stampfn, task)
-    if not os.access(stampfile, os.F_OK):
-        return 0
-
-    if checkdeps == 0:
-        return 1
-
-    import stat
-    tasktime = os.stat(stampfile)[stat.ST_MTIME]
-
-    _deps = []
-    def checkStamp(graph, task):
-        # check for existance
-        if file_name:
-            if 'nostamp' in taskdep and task in taskdep['nostamp']:
-                return 1
-        else:
-            if data.getVarFlag(task, 'nostamp', d):
-                return 1
-
-        if not stamp_is_current(task, d, file_name, 0                                          ):
-            return 0
-
-        depfile = "%s.%s" % (stampfn, task)
-        deptime = os.stat(depfile)[stat.ST_MTIME]
-        if deptime > tasktime:
-            return 0
-        return 1
-
-    return task_graph.walkdown(task, checkStamp)
-
 def stamp_internal(task, d, file_name):
     """
     Internal stamp helper function
@@ -409,40 +331,39 @@ def del_stamp(task, d, file_name = None):
     stamp_internal(task, d, file_name)
 
 def add_tasks(tasklist, d):
-    task_graph = data.getVar('_task_graph', d)
     task_deps = data.getVar('_task_deps', d)
-    if not task_graph:
-        task_graph = bb.digraph()
     if not task_deps:
         task_deps = {}
+    if not 'tasks' in task_deps:
+        task_deps['tasks'] = []
+    if not 'parents' in task_deps:
+        task_deps['parents'] = {}
 
     for task in tasklist:
-        deps = tasklist[task]
         task = data.expand(task, d)
-
         data.setVarFlag(task, 'task', 1, d)
-        task_graph.addnode(task, None)
-        for dep in deps:
-            dep = data.expand(dep, d)
-            if not task_graph.hasnode(dep):
-                task_graph.addnode(dep, None)
-            task_graph.addnode(task, dep)
+
+        if not task in task_deps['tasks']:
+            task_deps['tasks'].append(task)
 
         flags = data.getVarFlags(task, d)    
         def getTask(name):
+            if not name in task_deps:
+                task_deps[name] = {}
             if name in flags:
                 deptask = data.expand(flags[name], d)
-                if not name in task_deps:
-                    task_deps[name] = {}
                 task_deps[name][task] = deptask
         getTask('depends')
         getTask('deptask')
         getTask('rdeptask')
         getTask('recrdeptask')
         getTask('nostamp')
+        task_deps['parents'][task] = []
+        for dep in flags['deps']:
+            dep = data.expand(dep, d)
+            task_deps['parents'][task].append(dep)
 
     # don't assume holding a reference
-    data.setVar('_task_graph', task_graph, d)
     data.setVar('_task_deps', task_deps, d)
 
 def remove_task(task, kill, d):
@@ -450,22 +371,5 @@ def remove_task(task, kill, d):
 
        If kill is 1, also remove tasks that depend on this task."""
 
-    task_graph = data.getVar('_task_graph', d)
-    if not task_graph:
-        task_graph = bb.digraph()
-    if not task_graph.hasnode(task):
-        return
-
     data.delVarFlag(task, 'task', d)
-    ref = 1
-    if kill == 1:
-        ref = 2
-    task_graph.delnode(task, ref)
-    data.setVar('_task_graph', task_graph, d)
-
-def task_exists(task, d):
-    task_graph = data.getVar('_task_graph', d)
-    if not task_graph:
-        task_graph = bb.digraph()
-        data.setVar('_task_graph', task_graph, d)
-    return task_graph.hasnode(task)
+
index 7d7e66ebd29faecc44c5c0c7e340e85c533cf917..dad82a9b36b8b48b65e427e7122465273181c2e2 100644 (file)
@@ -39,7 +39,7 @@ except ImportError:
     import pickle
     bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
 
-__cache_version__ = "127"
+__cache_version__ = "128"
 
 class Cache:
     """
@@ -50,9 +50,11 @@ class Cache:
 
         self.cachedir = bb.data.getVar("CACHE", cooker.configuration.data, True)
         self.clean = {}
+        self.checked = {}
         self.depends_cache = {}
         self.data = None
         self.data_fn = None
+        self.cacheclean = True
 
         if self.cachedir in [None, '']:
             self.has_cache = False
@@ -67,9 +69,20 @@ class Cache:
             except OSError:
                 bb.mkdirhier( self.cachedir )
 
-        if self.has_cache and (self.mtime(self.cachefile)):
+        if not self.has_cache:
+            return            
+
+        # If any of configuration.data's dependencies are newer than the
+        # cache there isn't even any point in loading it...
+        newest_mtime = 0
+        deps = bb.data.getVar("__depends", cooker.configuration.data, True)
+        for f,old_mtime in deps:
+            if old_mtime > newest_mtime:
+                newest_mtime = old_mtime
+
+        if self.mtime(self.cachefile) >= newest_mtime:
             try:
-                p = pickle.Unpickler( file(self.cachefile,"rb"))
+                p = pickle.Unpickler(file(self.cachefile, "rb"))
                 self.depends_cache, version_data = p.load()
                 if version_data['CACHE_VER'] != __cache_version__:
                     raise ValueError, 'Cache Version Mismatch'
@@ -81,11 +94,8 @@ class Cache:
             except (ValueError, KeyError):
                 bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...")
                 self.depends_cache = {}
-
-        if self.depends_cache:
-            for fn in self.depends_cache.keys():
-                self.clean[fn] = ""
-                self.cacheValidUpdate(fn)
+        else:
+            bb.msg.note(1, bb.msg.domain.Cache, "Out of date cache found, rebuilding...")
 
     def getVar(self, var, fn, exp = 0):
         """
@@ -97,7 +107,6 @@ class Cache:
           2. We're learning what data to cache - serve from data 
              backend but add a copy of the data to the cache.
         """
-
         if fn in self.clean:
             return self.depends_cache[fn][var]
 
@@ -109,6 +118,7 @@ class Cache:
             # yet setData hasn't been called to setup the right access. Very bad.
             bb.msg.error(bb.msg.domain.Cache, "Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn))
 
+        self.cacheclean = False
         result = bb.data.getVar(var, self.data, exp)
         self.depends_cache[fn][var] = result
         return result
@@ -131,6 +141,8 @@ class Cache:
         Return a complete set of data for fn.
         To do this, we need to parse the file.
         """
+        bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s (full)" % fn)
+
         bb_data, skipped = self.load_bbfile(fn, cfgData)
         return bb_data
 
@@ -142,11 +154,15 @@ class Cache:
         to record the variables accessed.
         Return the cache status and whether the file was skipped when parsed
         """
+        if fn not in self.checked:
+            self.cacheValidUpdate(fn)
         if self.cacheValid(fn):
             if "SKIPPED" in self.depends_cache[fn]:
                 return True, True
             return True, False
 
+        bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s" % fn)
+
         bb_data, skipped = self.load_bbfile(fn, cfgData)
         self.setData(fn, bb_data)
         return False, skipped
@@ -172,11 +188,10 @@ class Cache:
         if not self.has_cache:
             return False
 
-        # Check file still exists
-        if self.mtime(fn) == 0:
-            bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn)
-            self.remove(fn)
-            return False
+        self.checked[fn] = ""
+
+        # Pretend we're clean so getVar works
+        self.clean[fn] = ""
 
         # File isn't in depends_cache
         if not fn in self.depends_cache:
@@ -184,6 +199,12 @@ class Cache:
             self.remove(fn)
             return False
 
+        # Check file still exists
+        if self.mtime(fn) == 0:
+            bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn)
+            self.remove(fn)
+            return False
+
         # Check the file's timestamp
         if bb.parse.cached_mtime(fn) > self.getVar("CACHETIMESTAMP", fn, True):
             bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn)
@@ -195,6 +216,7 @@ class Cache:
         for f,old_mtime in depends:
             # Check if file still exists
             if self.mtime(f) == 0:
+                self.remove(fn)
                 return False
 
             new_mtime = bb.parse.cached_mtime(f)
@@ -203,7 +225,7 @@ class Cache:
                 self.remove(fn)
                 return False
 
-        bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn)
+        #bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn)
         if not fn in self.clean:
             self.clean[fn] = ""
 
@@ -238,6 +260,10 @@ class Cache:
         if not self.has_cache:
             return
 
+        if self.cacheclean:
+            bb.msg.note(1, bb.msg.domain.Cache, "Cache is clean, not saving.")
+            return
+
         version_data = {}
         version_data['CACHE_VER'] = __cache_version__
         version_data['BITBAKE_VER'] = bb.__version__
@@ -264,7 +290,6 @@ class Cache:
         packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split()
         rprovides = (self.getVar("RPROVIDES", file_name, True) or "").split()
 
-        cacheData.task_queues[file_name] = self.getVar("_task_graph", file_name, True)
         cacheData.task_deps[file_name] = self.getVar("_task_deps", file_name, True)
 
         # build PackageName to FileName lookup table
@@ -328,14 +353,16 @@ class Cache:
         if not file_name in cacheData.runrecs:
             cacheData.runrecs[file_name] = {}
 
+        rdepends = bb.utils.explode_deps(self.getVar('RDEPENDS', file_name, True) or "")
+        rrecommends = bb.utils.explode_deps(self.getVar('RRECOMMENDS', file_name, True) or "")
         for package in packages + [pn]:
             if not package in cacheData.rundeps[file_name]:
                 cacheData.rundeps[file_name][package] = {}
             if not package in cacheData.runrecs[file_name]:
                 cacheData.runrecs[file_name][package] = {}
 
-            add_dep(cacheData.rundeps[file_name][package], bb.utils.explode_deps(self.getVar('RDEPENDS', file_name, True) or ""))
-            add_dep(cacheData.runrecs[file_name][package], bb.utils.explode_deps(self.getVar('RRECOMMENDS', file_name, True) or ""))
+            add_dep(cacheData.rundeps[file_name][package], rdepends)
+            add_dep(cacheData.runrecs[file_name][package], rrecommends)
             add_dep(cacheData.rundeps[file_name][package], bb.utils.explode_deps(self.getVar("RDEPENDS_%s" % package, file_name, True) or ""))
             add_dep(cacheData.runrecs[file_name][package], bb.utils.explode_deps(self.getVar("RRECOMMENDS_%s" % package, file_name, True) or ""))
 
index 2c091b65228afe526a8a4cdde11c5ec180db96fb..38a8209760b76356542065cb285e15f99decf97c 100644 (file)
@@ -97,14 +97,12 @@ class BBCooker:
             bb.msg.note(2, bb.msg.domain.Build, "Renice to %s " % os.nice(nice))
  
 
-    def tryBuildPackage(self, fn, item, task, the_data, build_depends):
+    def tryBuildPackage(self, fn, item, task, the_data):
         """
         Build one task of a package, optionally build following task depends
         """
         bb.event.fire(bb.event.PkgStarted(item, the_data))
         try:
-            if not build_depends:
-                bb.data.setVarFlag('do_%s' % task, 'dontrundeps', 1, the_data)
             if not self.configuration.dry_run:
                 bb.build.exec_task('do_%s' % task, the_data)
             bb.event.fire(bb.event.PkgSucceeded(item, the_data))
@@ -119,21 +117,20 @@ class BBCooker:
             bb.event.fire(bb.event.PkgFailed(item, the_data))
             raise
 
-    def tryBuild( self, fn, build_depends):
+    def tryBuild(self, fn):
         """
         Build a provider and its dependencies. 
         build_depends is a list of previous build dependencies (not runtime)
         If build_depends is empty, we're dealing with a runtime depends
         """
-
         the_data = self.bb_cache.loadDataFull(fn, self.configuration.data)
 
         item = self.status.pkg_fn[fn]
 
-        if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
-            return True
+        #if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
+        #    return True
 
-        return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data, build_depends)
+        return self.tryBuildPackage(fn, item, self.configuration.cmd, the_data)
 
     def showVersions(self):
         pkg_pn = self.status.pkg_pn
@@ -184,6 +181,8 @@ class BBCooker:
             self.cb = None
             self.bb_cache = bb.cache.init(self)
             fn = self.matchFile(buildfile)
+            if not fn:
+                sys.exit(1)
         elif len(pkgs_to_build) == 1:
             self.updateCache()
 
@@ -220,7 +219,7 @@ class BBCooker:
         except Exception, e:
             bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e)
         # emit the metadata which isnt valid shell
-        data.expandKeys( envdata )     
+        data.expandKeys( envdata )
         for e in envdata.keys():
             if data.getVarFlag( e, 'python', envdata ):
                 sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1)))
@@ -273,7 +272,7 @@ class BBCooker:
             if fnid not in seen_fnids:
                 seen_fnids.append(fnid)
                 packages = []
-                print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn)             
+                print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn)
                 for depend in self.status.deps[fn]:
                     print >> depends_file, '"%s" -> "%s"' % (pn, depend)
                 rdepends = self.status.rundeps[fn]
@@ -387,19 +386,15 @@ class BBCooker:
         try:
             self.configuration.data = bb.parse.handle( afile, self.configuration.data )
 
-            # Add the handlers we inherited by INHERIT
-            # we need to do this manually as it is not guranteed
-            # we will pick up these classes... as we only INHERIT
-            # on .inc and .bb files but not on .conf
-            data = bb.data.createCopy( self.configuration.data )
-            inherits  = ["base"] + (bb.data.getVar('INHERIT', data, True ) or "").split()
+            # Handle any INHERITs and inherit the base class
+            inherits  = ["base"] + (bb.data.getVar('INHERIT', self.configuration.data, True ) or "").split()
             for inherit in inherits:
-                data = bb.parse.handle( os.path.join('classes', '%s.bbclass' % inherit ), data, True )
+                self.configuration.data = bb.parse.handle(os.path.join('classes', '%s.bbclass' % inherit), self.configuration.data, True )
 
-            # FIXME: This assumes that we included at least one .inc file
-            for var in bb.data.keys(data):
-                if bb.data.getVarFlag(var, 'handler', data):
-                    bb.event.register(var,bb.data.getVar(var, data))
+            # Nomally we only register event handlers at the end of parsing .bb files
+            # We register any handlers we've found so far here...
+            for var in data.getVar('__BBHANDLERS', self.configuration.data) or []:
+                bb.event.register(var,bb.data.getVar(var, self.configuration.data))
 
             bb.fetch.fetcher_init(self.configuration.data)
 
@@ -463,30 +458,62 @@ class BBCooker:
                 bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (buildfile, len(matches)))
                 for f in matches:
                     bb.msg.error(bb.msg.domain.Parsing, "    %s" % f)
-                sys.exit(1)
-            return matches[0]              
+                return False
+            return matches[0]
 
     def buildFile(self, buildfile):
         """
         Build the file matching regexp buildfile
         """
 
-        bf = self.matchFile(buildfile)
+        # Make sure our target is a fully qualified filename
+        fn = self.matchFile(buildfile)
+        if not fn:
+            return False
 
-        bbfile_data = bb.parse.handle(bf, self.configuration.data)
+        # Load data into the cache for fn
+        self.bb_cache = bb.cache.init(self)
+        self.bb_cache.loadData(fn, self.configuration.data)      
+
+        # Parse the loaded cache data
+        self.status = bb.cache.CacheData()
+        self.bb_cache.handle_data(fn, self.status)  
+
+        # Tweak some variables
+        item = self.bb_cache.getVar('PN', fn, True)
+        self.status.ignored_dependencies = Set()
+        self.status.bbfile_priority[fn] = 1
+
+        # Remove external dependencies
+        self.status.task_deps[fn]['depends'] = {}
+        self.status.deps[fn] = []
+        self.status.rundeps[fn] = []
+        self.status.runrecs[fn] = []
 
         # Remove stamp for target if force mode active
         if self.configuration.force:
-            bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, bf))
+            bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (self.configuration.cmd, fn))
             bb.build.del_stamp('do_%s' % self.configuration.cmd, bbfile_data)
 
-        item = bb.data.getVar('PN', bbfile_data, 1)
-        try:
-            self.tryBuildPackage(bf, item, self.configuration.cmd, bbfile_data, True)
-        except bb.build.EventException:
-            bb.msg.error(bb.msg.domain.Build,  "Build of '%s' failed" % item )
+        # Setup taskdata structure
+        taskdata = bb.taskdata.TaskData(self.configuration.abort)
+        taskdata.add_provider(self.configuration.data, self.status, item)
 
-        sys.exit(0)
+        buildname = bb.data.getVar("BUILDNAME", self.configuration.data)
+        bb.event.fire(bb.event.BuildStarted(buildname, [item], self.configuration.event_data))
+
+        # Execute the runqueue
+        runlist = [[item, "do_%s" % self.configuration.cmd]]
+        rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
+        rq.prepare_runqueue()
+        try:
+            failures = rq.execute_runqueue()
+        except runqueue.TaskFailure, fnids:
+            for fnid in fnids:
+                bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid])
+            return False
+        bb.event.fire(bb.event.BuildCompleted(buildname, [item], self.configuration.event_data, failures))
+        return True
 
     def buildTargets(self, targets):
         """
@@ -568,7 +595,9 @@ class BBCooker:
             self.interactiveMode()
 
         if self.configuration.buildfile is not None:
-            return self.buildFile(self.configuration.buildfile)
+            if not self.buildFile(self.configuration.buildfile):
+                sys.exit(1)
+            sys.exit(0)
 
         # initialise the parsing status now we know we will need deps
         self.updateCache()
@@ -676,7 +705,7 @@ class BBCooker:
         for i in xrange( len( filelist ) ):
             f = filelist[i]
 
-            bb.msg.debug(1, bb.msg.domain.Collection, "parsing %s" % f)
+            #bb.msg.debug(1, bb.msg.domain.Collection, "parsing %s" % f)
 
             # read a file's metadata
             try:
index e879343f5dfa31e2963d0a9c7ad7f6f3c5da6083..b3a51b0edf6453d40172eb78506275b91e55acda 100644 (file)
@@ -232,10 +232,10 @@ class DataSmart:
         flags = {}
 
         if local_var:
-            for i in self.dict[var].keys():
+            for i in local_var.keys():
                 if i == "content":
                     continue
-                flags[i] = self.dict[var][i]
+                flags[i] = local_var[i]
 
         if len(flags) == 0:
             return None
index 7148a2b7d6b7e1eb56a17cbc4dc8bc779f23ee14..c0a59e612008ac05e730b87b4239c5c48516e208 100644 (file)
@@ -127,6 +127,23 @@ def getName(e):
 class ConfigParsed(Event):
     """Configuration Parsing Complete"""
 
+class StampUpdate(Event):
+    """Trigger for any adjustment of the stamp files to happen"""
+
+    def __init__(self, targets, stampfns, d):
+        self._targets = targets
+        self._stampfns = stampfns
+        Event.__init__(self, d)
+
+    def getStampPrefix(self):
+        return self._stampfns
+
+    def getTargets(self):
+        return self._targets
+
+    stampPrefix = property(getStampPrefix)
+    targets = property(getTargets)
+
 class PkgBase(Event):
     """Base class for package events"""
 
index 700efcb4ac0e16f31ce21d91671d3c3355d5d150..4919b9d4730f3f15e6654ae859f6ccf3e71e63da 100644 (file)
@@ -139,13 +139,21 @@ def go(d):
             if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
                 # File already present along with md5 stamp file
                 # Touch md5 file to show activity
-                os.utime(ud.md5, None)
+                try:
+                    os.utime(ud.md5, None)
+                except:
+                    # Errors aren't fatal here
+                    pass
                 continue
             lf = bb.utils.lockfile(ud.lockfile)
             if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
                 # If someone else fetched this before we got the lock, 
                 # notice and don't try again
-                os.utime(ud.md5, None)
+                try:
+                    os.utime(ud.md5, None)
+                except:
+                    # Errors aren't fatal here
+                    pass
                 bb.utils.unlockfile(lf)
                 continue
         m.go(u, ud, d)
index 2a30e5895ab56fcd20cc887a754ef8979b4f2857..d7bf6d4f372cfd5de5dfbde71891abb5f93cfd76 100644 (file)
@@ -95,6 +95,10 @@ def handle(fn, d, include = 0):
     if ext == ".bbclass":
         __classname__ = root
         classes.append(__classname__)
+        __inherit_cache = data.getVar('__inherit_cache', d) or []
+        if not fn in __inherit_cache:
+            __inherit_cache.append(fn)
+            data.setVar('__inherit_cache', __inherit_cache, d)
 
     if include != 0:
         oldfile = data.getVar('FILE', d)
@@ -126,10 +130,6 @@ def handle(fn, d, include = 0):
 
     if ext != ".bbclass":
         data.setVar('FILE', fn, d)
-        i = (data.getVar("INHERIT", d, 1) or "").split()
-        if not "base" in i and __classname__ != "base":
-            i[0:0] = ["base"]
-        inherit(i, d)
 
     lineno = 0
     while 1:
@@ -171,33 +171,12 @@ def handle(fn, d, include = 0):
             all_handlers = {} 
             for var in data.getVar('__BBHANDLERS', d) or []:
                 # try to add the handler
-                # if we added it remember the choiche
                 handler = data.getVar(var,d)
-                if bb.event.register(var,handler) == bb.event.Registered:
-                    all_handlers[var] = handler
-
-            tasklist = {}
-            for var in data.getVar('__BBTASKS', d) or []:
-                if var not in tasklist:
-                    tasklist[var] = []
-                deps = data.getVarFlag(var, 'deps', d) or []
-                for p in deps:
-                    if p not in tasklist[var]:
-                        tasklist[var].append(p)
-
-                postdeps = data.getVarFlag(var, 'postdeps', d) or []
-                for p in postdeps:
-                    if p not in tasklist:
-                        tasklist[p] = []
-                    if var not in tasklist[p]:
-                        tasklist[p].append(var)
+                bb.event.register(var, handler)
 
+            tasklist = data.getVar('__BBTASKS', d) or []
             bb.build.add_tasks(tasklist, d)
 
-            # now add the handlers
-            if not len(all_handlers) == 0:
-                data.setVar('__all_handlers__', all_handlers, d)
-
         bbpath.pop(0)
     if oldfile:
         bb.data.setVar("FILE", oldfile, d)
@@ -342,15 +321,23 @@ def feeder(lineno, s, fn, root, d):
         data.setVarFlag(var, "task", 1, d)
 
         bbtasks = data.getVar('__BBTASKS', d) or []
-        bbtasks.append(var)
+        if not var in bbtasks:
+            bbtasks.append(var)
         data.setVar('__BBTASKS', bbtasks, d)
 
+        existing = data.getVarFlag(var, "deps", d) or []
         if after is not None:
-#           set up deps for function
-            data.setVarFlag(var, "deps", after.split(), d)
+            # set up deps for function
+            for entry in after.split():
+                if entry not in existing:
+                    existing.append(entry)
+        data.setVarFlag(var, "deps", existing, d)
         if before is not None:
-#           set up things that depend on this func
-            data.setVarFlag(var, "postdeps", before.split(), d)
+            # set up things that depend on this func
+            for entry in before.split():
+                existing = data.getVarFlag(entry, "deps", d) or []
+                if var not in existing:
+                    data.setVarFlag(entry, "deps", [var] + existing, d)
         return
 
     m = __addhandler_regexp__.match(s)
index 9d72d92fac2f58695fd34abfcd47abfd00256f19..2765343a3e7756223f47d8b26129b093a02729ea 100644 (file)
@@ -26,6 +26,7 @@ from bb import msg, data, event, mkdirhier, utils
 from sets import Set 
 import bb, os, sys
 import signal
+import stat
 
 class TaskFailure(Exception):
     """Exception raised when a task in a runqueue fails"""
@@ -45,11 +46,11 @@ class RunQueueStats:
     def taskFailed(self):
         self.failed = self.failed + 1
 
-    def taskCompleted(self):
-        self.completed = self.completed + 1
+    def taskCompleted(self, number = 1):
+        self.completed = self.completed + number
 
-    def taskSkipped(self):
-        self.skipped = self.skipped + 1
+    def taskSkipped(self, number = 1):
+        self.skipped = self.skipped + number
 
 class RunQueueScheduler:
     """
@@ -144,8 +145,11 @@ class RunQueue:
         self.taskData = taskData
         self.targets = targets
 
-        self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData) or 1)
-        self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData) or "").split()
+        self.cfgdata = cfgData
+        self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData, 1) or 1)
+        self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split()
+        self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed"
+        self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile"
 
     def reset_runqueue(self):
 
@@ -512,6 +516,7 @@ class RunQueue:
             for depend in depends:
                 mark_active(depend, depth+1)
 
+        self.target_pairs = []
         for target in self.targets:
             targetid = taskData.getbuild_id(target[0])
 
@@ -522,10 +527,11 @@ class RunQueue:
                 continue
 
             fnid = taskData.build_targets[targetid][0]
+            fn = taskData.fn_index[fnid]
+            self.target_pairs.append((fn, target[1]))
 
             # Remove stamps for targets if force mode active
             if self.cooker.configuration.force:
-                fn = taskData.fn_index[fnid]
                 bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn))
                 bb.build.del_stamp(target[1], self.dataCache, fn)
 
@@ -608,10 +614,11 @@ class RunQueue:
         self.runq_weight = self.calculate_task_weights(endpoints)
 
         # Decide what order to execute the tasks in, pick a scheduler
-        # FIXME - Allow user selection
         #self.sched = RunQueueScheduler(self)
-        self.sched = RunQueueSchedulerSpeed(self)
-        #self.sched = RunQueueSchedulerCompletion(self)
+        if self.scheduler == "completion":
+            self.sched = RunQueueSchedulerCompletion(self)
+        else:
+            self.sched = RunQueueSchedulerSpeed(self)
 
         # Sanity Check - Check for multiple tasks building the same provider
         prov_list = {}
@@ -636,6 +643,93 @@ class RunQueue:
 
         #self.dump_data(taskData)
 
+    def check_stamps(self):
+        unchecked = {}
+        current = []
+        notcurrent = []
+        buildable = []
+
+        if self.stamppolicy == "perfile":
+            fulldeptree = False
+        else:
+            fulldeptree = True
+
+        for task in range(len(self.runq_fnid)):
+            unchecked[task] = ""
+            if len(self.runq_depends[task]) == 0:
+                buildable.append(task)
+
+        for task in range(len(self.runq_fnid)):
+            if task not in unchecked:
+                continue
+            fn = self.taskData.fn_index[self.runq_fnid[task]]
+            taskname = self.runq_task[task]
+            stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
+            # If the stamp is missing its not current
+            if not os.access(stampfile, os.F_OK):
+                del unchecked[task]
+                notcurrent.append(task)
+                continue
+            # If its a 'nostamp' task, it's not current
+            taskdep = self.dataCache.task_deps[fn]
+            if 'nostamp' in taskdep and task in taskdep['nostamp']:
+                del unchecked[task]
+                notcurrent.append(task)
+                continue
+
+        while (len(buildable) > 0):
+            nextbuildable = []
+            for task in buildable:
+                if task in unchecked:
+                    fn = self.taskData.fn_index[self.runq_fnid[task]]
+                    taskname = self.runq_task[task]
+                    stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname)
+                    iscurrent = True
+
+                    t1 = os.stat(stampfile)[stat.ST_MTIME]
+                    for dep in self.runq_depends[task]:
+                        if iscurrent:
+                            fn2 = self.taskData.fn_index[self.runq_fnid[dep]]
+                            taskname2 = self.runq_task[dep]
+                            stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2)
+                            if fulldeptree or fn == fn2:
+                                if dep in notcurrent:
+                                    iscurrent = False
+                                else:
+                                    t2 = os.stat(stampfile2)[stat.ST_MTIME]
+                                    if t1 < t2:
+                                        iscurrent = False
+                    del unchecked[task]
+                    if iscurrent:
+                        current.append(task)
+                    else:
+                        notcurrent.append(task)
+
+                for revdep in self.runq_revdeps[task]:
+                    alldeps = 1
+                    for dep in self.runq_depends[revdep]:
+                        if dep in unchecked:
+                            alldeps = 0
+                    if alldeps == 1:
+                        if revdep in unchecked:
+                            nextbuildable.append(revdep)
+
+            buildable = nextbuildable
+
+        #for task in range(len(self.runq_fnid)):
+        #    fn = self.taskData.fn_index[self.runq_fnid[task]]
+        #    taskname = self.runq_task[task]
+        #    print "%s %s.%s" % (task, taskname, fn)
+
+        #print "Unchecked: %s" % unchecked
+        #print "Current: %s" % current
+        #print "Not current: %s" % notcurrent
+
+        if len(unchecked) > 0:
+            bb.fatal("check_stamps fatal internal error")
+        return current
+
+
     def execute_runqueue(self):
         """
         Run the tasks in a queue prepared by prepare_runqueue
@@ -721,18 +815,13 @@ class RunQueue:
         def sigint_handler(signum, frame):
             raise KeyboardInterrupt
 
-        # RP - this code allows tasks to run out of the correct order - disabled, FIXME
-        # Find any tasks with current stamps and remove them from the queue
-        #for task1 in range(len(self.runq_fnid)):
-        #    task = self.prio_map[task1]
-        #    fn = self.taskData.fn_index[self.runq_fnid[task]]
-        #    taskname = self.runq_task[task]
-        #    if bb.build.stamp_is_current(taskname, self.dataCache, fn):
-        #        bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
-        #        self.runq_running[task] = 1
-        #        self.task_complete(task)
-        #        self.stats.taskCompleted()
-        #        self.stats.taskSkipped()
+        event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp, self.cfgdata))
+
+        # Find out which tasks have current stamps which we can skip when the
+        # time comes
+        currentstamps = self.check_stamps()
+        self.stats.taskSkipped(len(currentstamps))
+        self.stats.taskCompleted(len(currentstamps))
 
         while True:
             task = self.sched.next()
@@ -740,12 +829,13 @@ class RunQueue:
                 fn = self.taskData.fn_index[self.runq_fnid[task]]
 
                 taskname = self.runq_task[task]
-                if bb.build.stamp_is_current(taskname, self.dataCache, fn):
+                if task in currentstamps:
+                #if bb.build.stamp_is_current(taskname, self.dataCache, fn):
                     bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
                     self.runq_running[task] = 1
                     self.task_complete(task)
-                    self.stats.taskCompleted()
-                    self.stats.taskSkipped()
+                    #self.stats.taskCompleted()
+                    #self.stats.taskSkipped()
                     continue
 
                 bb.msg.note(1, bb.msg.domain.RunQueue, "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.active_builds + 1, len(self.runq_fnid), task, self.get_user_idstring(task)))
@@ -764,7 +854,7 @@ class RunQueue:
                     os.dup2(newsi, sys.stdin.fileno())
                     self.cooker.configuration.cmd = taskname[3:]
                     try: 
-                        self.cooker.tryBuild(fn, False)
+                        self.cooker.tryBuild(fn)
                     except bb.build.EventException:
                         bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed")
                         sys.exit(1)
index 745091fb7df8053f569cb6bb0e4df606d923befd..feba3f2b442340421e115a20e1db8e5fc03fbb83 100644 (file)
@@ -243,27 +243,13 @@ class BitBakeShellCommands:
         oldcmd = cooker.configuration.cmd
         cooker.configuration.cmd = cmd
 
-        thisdata = data.createCopy(cooker.configuration.data)
-        data.update_data(thisdata)
-        data.expandKeys(thisdata)
-
         try:
-            bbfile_data = parse.handle( bf, thisdata )
+            cooker.buildFile(bf)
         except parse.ParseError:
             print "ERROR: Unable to open or parse '%s'" % bf
-        else:
-            # Remove stamp for target if force mode active
-            if cooker.configuration.force:
-                bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (cmd, bf))
-                bb.build.del_stamp('do_%s' % cmd, bbfile_data)
-
-            item = data.getVar('PN', bbfile_data, 1)
-            data.setVar( "_task_cache", [], bbfile_data ) # force
-            try:
-                cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True )
-            except build.EventException, e:
-                print "ERROR: Couldn't build '%s'" % name
-                last_exception = e
+        except build.EventException, e:
+            print "ERROR: Couldn't build '%s'" % name
+            last_exception = e
 
         cooker.configuration.cmd = oldcmd
     fileBuild.usage = "<bbfile>"
@@ -586,6 +572,7 @@ SRC_URI = ""
 
 def completeFilePath( bbfile ):
     """Get the complete bbfile path"""
+    if not cooker.status: return bbfile
     if not cooker.status.pkg_fn: return bbfile
     for key in cooker.status.pkg_fn.keys():
         if key.endswith( bbfile ):
index 3dac6c26afd07988e4a9f688f4a312d67ed82687..4a79e7a56d4b4efd7055f319592c89ddce0cc0d2 100644 (file)
@@ -124,7 +124,6 @@ class TaskData:
         Add tasks for a given fn to the database
         """
 
-        task_graph = dataCache.task_queues[fn]
         task_deps = dataCache.task_deps[fn]
 
         fnid = self.getfn_id(fn)
@@ -136,11 +135,11 @@ class TaskData:
         if fnid in self.tasks_fnid:
             return
 
-        for task in task_graph.allnodes():
+        for task in task_deps['tasks']:
 
             # Work out task dependencies
             parentids = []
-            for dep in task_graph.getparents(task):
+            for dep in task_deps['parents'][task]:
                 parentid = self.gettask_id(fn, dep)
                 parentids.append(parentid)
             taskid = self.gettask_id(fn, task)
index a2a5ff6cfd353fd7edcc38c44b56842a87f0b573..9702c8c2042288119b731a7b7556c7b72890affd 100644 (file)
@@ -85,11 +85,11 @@ def explode_deps(s):
     for i in l:
         if i[0] == '(':
             flag = True
-            j = []
-        if flag:
-            j.append(i)
-        else:
+            #j = []
+        if not flag:
             r.append(i)
+        #else:
+        #    j.append(i)
         if flag and i.endswith(')'):
             flag = False
             # Ignore version