]> code.ossystems Code Review - openembedded-core.git/commitdiff
bitbake: Sync with 1.8.8 release
authorRichard Purdie <richard@openedhand.com>
Mon, 20 Aug 2007 07:48:43 +0000 (07:48 +0000)
committerRichard Purdie <richard@openedhand.com>
Mon, 20 Aug 2007 07:48:43 +0000 (07:48 +0000)
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@2513 311d38ba-8fff-0310-9ca6-ca027cbcb966

bitbake/ChangeLog
bitbake/bin/bitbake
bitbake/lib/bb/__init__.py
bitbake/lib/bb/build.py
bitbake/lib/bb/cooker.py
bitbake/lib/bb/data.py
bitbake/lib/bb/fetch/__init__.py
bitbake/lib/bb/fetch/svn.py
bitbake/lib/bb/parse/parse_py/BBHandler.py
bitbake/lib/bb/runqueue.py
bitbake/lib/bb/taskdata.py

index 878afadb439f6f876f96acbb7d2bbc917069fb24..7b99a1b054bc46cf527c6041d5c3adb86ce4de1e 100644 (file)
@@ -1,4 +1,6 @@
-Changes in Bitbake 1.8.x:
+Changes in BitBake 1.8.x:
+
+Changes in Bitbake 1.8.8:
        - Rewrite svn fetcher to make adding extra operations easier 
          as part of future SRCDATE="now" fixes
          (requires new FETCHCMD_svn definition in bitbake.conf)
@@ -25,7 +27,15 @@ Changes in Bitbake 1.8.x:
        - Fix int(0)/None confusion in runqueue.py which causes random gaps in dependency chains
        - Fix handling of variables with expansion in the name using _append/_prepend
          e.g. RRECOMMENDS_${PN}_append_xyz = "abc"
-       
+       - Expand data in addtasks
+       - Print the list of missing DEPENDS,RDEPENDS for the "No buildable providers available for required...."
+         error message.
+       - Rework add_task to be more efficient (6% speedup, 7% number of function calls reduction)
+       - Sort digraph output to make builds more reproducible
+       - Split expandKeys into two for loops to benefit from the expand_cache (12% speedup)
+       - runqueue.py: Fix idepends handling to avoid dependency errors
+       - Clear the terminal TOSTOP flag if set (and warn the user)
+       - Fix regression from r653 and make SRCDATE/CVSDATE work for packages again
 
 Changes in Bitbake 1.8.6:
        - Correctly redirect stdin when forking
index 8b69a0a33fb55771cdef58eecc096f56a837c449..53185e58c68e864aa16ab7bf85e846a4ae45c5c6 100755 (executable)
@@ -27,7 +27,7 @@ sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'l
 import bb
 from bb import cooker
 
-__version__ = "1.8.7"
+__version__ = "1.8.9"
 
 #============================================================================#
 # BBOptions
index 585eec88752b9bec0ab4e4589eea8299dc7fc4a2..77b1255c77baff7ace28bf0919c16c5baae8ac7f 100644 (file)
@@ -21,7 +21,7 @@
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
-__version__ = "1.8.7"
+__version__ = "1.8.9"
 
 __all__ = [
 
@@ -1124,7 +1124,12 @@ class digraph:
 
     def allnodes(self):
         "returns all nodes in the dictionary"
-        return self.dict.keys()
+        keys = self.dict.keys()
+        ret = []
+        for key in keys:
+            ret.append(key)
+        ret.sort()
+        return ret
 
     def firstzero(self):
         "returns first node with zero references, or NULL if no such node exists"
@@ -1168,7 +1173,12 @@ class digraph:
     def getparents(self, item):
         if not self.hasnode(item):
             return []
-        return self.dict[item][1]
+        parents = self.dict[item][1]
+        ret = []
+        for parent in parents:
+            ret.append(parent)
+        ret.sort()
+        return ret
 
     def getchildren(self, item):
         if not self.hasnode(item):
index bcbc55eea5011e87a900e75b0c2a10607a6522f3..e9a6fc8c61b30dc84d35ec11a0d699314f19c122 100644 (file)
@@ -397,35 +397,41 @@ def del_stamp(task, d, file_name = None):
     """
     stamp_internal(task, d, file_name)
 
-def add_task(task, deps, d):
+def add_tasks(tasklist, d):
     task_graph = data.getVar('_task_graph', d)
+    task_deps = data.getVar('_task_deps', d)
     if not task_graph:
         task_graph = bb.digraph()
-    data.setVarFlag(task, 'task', 1, d)
-    task_graph.addnode(task, None)
-    for dep in deps:
-        if not task_graph.hasnode(dep):
-            task_graph.addnode(dep, None)
-        task_graph.addnode(task, dep)
-    # don't assume holding a reference
-    data.setVar('_task_graph', task_graph, d)
-
-    task_deps = data.getVar('_task_deps', d)
     if not task_deps:
         task_deps = {}
-    def getTask(name):
-        deptask = data.getVarFlag(task, name, d)
-        if deptask:
-            deptask = data.expand(deptask, d)
-            if not name in task_deps:
-                task_deps[name] = {}
-            task_deps[name][task] = deptask
-    getTask('depends')
-    getTask('deptask')
-    getTask('rdeptask')
-    getTask('recrdeptask')
-    getTask('nostamp')
 
+    for task in tasklist:
+        deps = tasklist[task]
+        task = data.expand(task, d)
+
+        data.setVarFlag(task, 'task', 1, d)
+        task_graph.addnode(task, None)
+        for dep in deps:
+            dep = data.expand(dep, d)
+            if not task_graph.hasnode(dep):
+                task_graph.addnode(dep, None)
+            task_graph.addnode(task, dep)
+
+        flags = data.getVarFlags(task, d)    
+        def getTask(name):
+            if name in flags:
+                deptask = data.expand(flags[name], d)
+                if not name in task_deps:
+                    task_deps[name] = {}
+                task_deps[name][task] = deptask
+        getTask('depends')
+        getTask('deptask')
+        getTask('rdeptask')
+        getTask('recrdeptask')
+        getTask('nostamp')
+
+    # don't assume holding a reference
+    data.setVar('_task_graph', task_graph, d)
     data.setVar('_task_deps', task_deps, d)
 
 def remove_task(task, kill, d):
index 955fbb434c81838ba61d1903286da765a95745f1..0eda9eed99c8a455a369477392f464b76ffa7aa9 100644 (file)
@@ -73,6 +73,19 @@ class BBCooker:
         self.configuration.event_data = bb.data.createCopy(self.configuration.data)
         bb.data.update_data(self.configuration.event_data)
 
+        #
+        # TOSTOP must not be set or our children will hang when they output
+        #
+        fd = sys.stdout.fileno()
+        if os.isatty(fd):
+            import termios
+            tcattr = termios.tcgetattr(fd)
+            if tcattr[3] & termios.TOSTOP:
+                bb.msg.note(1, bb.msg.domain.Build, "The terminal had the TOSTOP bit set, clearing...")
+                tcattr[3] = tcattr[3] & ~termios.TOSTOP
+                termios.tcsetattr(fd, termios.TCSANOW, tcattr)
+
+
     def tryBuildPackage(self, fn, item, task, the_data, build_depends):
         """
         Build one task of a package, optionally build following task depends
index 21cdde04a80f0a04743799fbebac437b914a3be5..7ad1acad1ceed676e323cd395dbd52b132e3d63b 100644 (file)
@@ -282,6 +282,7 @@ def expandKeys(alterdata, readdata = None):
     if readdata == None:
         readdata = alterdata
 
+    todolist = {}
     for key in keys(alterdata):
         if not '${' in key:
             continue
@@ -289,7 +290,13 @@ def expandKeys(alterdata, readdata = None):
         ekey = expand(key, readdata)
         if key == ekey:
             continue
+        todolist[key] = ekey
 
+    # These two for loops are split for performance to maximise the 
+    # usefulness of the expand cache
+
+    for key in todolist:
+        ekey = todolist[key]
         renameVar(key, ekey, alterdata)
 
 def expandData(alterdata, readdata = None):
index bbff516ffc0561a041fc95b1a4b5b360576c6a0b..c34405738bd5934918debde6746589faeffc0a73 100644 (file)
@@ -135,26 +135,27 @@ def go(d):
     for u in urldata:
         ud = urldata[u]
         m = ud.method
-        if ud.localfile and not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
-            # File already present along with md5 stamp file
-            # Touch md5 file to show activity
-            os.utime(ud.md5, None)
-            continue
-        lf = open(ud.lockfile, "a+")
-        fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
-        if ud.localfile and not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
-            # If someone else fetched this before we got the lock, 
-            # notice and don't try again
-            os.utime(ud.md5, None)
+        if ud.localfile:
+            if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
+                # File already present along with md5 stamp file
+                # Touch md5 file to show activity
+                os.utime(ud.md5, None)
+                continue
+            lf = open(ud.lockfile, "a+")
+            fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
+            if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
+                # If someone else fetched this before we got the lock, 
+                # notice and don't try again
+                os.utime(ud.md5, None)
+                fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
+                lf.close
+                continue
+        m.go(u, ud, d)
+        if ud.localfile:
+            if not m.forcefetch(u, ud, d):
+                Fetch.write_md5sum(u, ud, d)
             fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
             lf.close
-            continue
-        m.go(u, ud, d)
-        if ud.localfile and not m.forcefetch(u, ud, d):
-            Fetch.write_md5sum(u, ud, d)
-        fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
-        lf.close
-
 
 def localpaths(d):
     """
@@ -339,7 +340,7 @@ class Fetch(object):
         pn = data.getVar("PN", d, 1)
 
         if pn:
-            return data.getVar("SRCDATE_%s" % pn, d, 1) or data.getVar("CVSDATE_%s" % pn, d, 1) or data.getVar("DATE", d, 1)
+            return data.getVar("SRCDATE_%s" % pn, d, 1) or data.getVar("CVSDATE_%s" % pn, d, 1) or data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1)
 
         return data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1)
     getSRCDate = staticmethod(getSRCDate)
index ca12efe158d9d8491938f70fad53fd642649cc14..af8543ab341f089a6ddd6e01e892e4862de73361 100644 (file)
@@ -74,11 +74,14 @@ class Svn(Fetch):
                 ud.revision = ""
             else:
                 rev = data.getVar("SRCREV", d, 0)
-                if "get_srcrev" in rev:
+                if rev and "get_srcrev" in rev:
                     ud.revision = self.latest_revision(url, ud, d)
-                else:
+                    ud.date = ""
+                elif rev:
                     ud.revision = rev
-                ud.date = ""
+                    ud.date = ""
+                else:
+                    ud.revision = ""           
 
         ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
 
index aaa262d3e2a744ffb002729ea510852a0cfc4c31..0f19f9a5d54721387685209ea9bb01918cba19d8 100644 (file)
@@ -176,15 +176,23 @@ def handle(fn, d, include = 0):
                 if bb.event.register(var,handler) == bb.event.Registered:
                     all_handlers[var] = handler
 
+            tasklist = {}
             for var in data.getVar('__BBTASKS', d) or []:
+                if var not in tasklist:
+                    tasklist[var] = []
                 deps = data.getVarFlag(var, 'deps', d) or []
+                for p in deps:
+                    if p not in tasklist[var]:
+                        tasklist[var].append(p)
+
                 postdeps = data.getVarFlag(var, 'postdeps', d) or []
-                bb.build.add_task(var, deps, d)
                 for p in postdeps:
-                    pdeps = data.getVarFlag(p, 'deps', d) or []
-                    pdeps.append(var)
-                    data.setVarFlag(p, 'deps', pdeps, d)
-                    bb.build.add_task(p, pdeps, d)
+                    if p not in tasklist:
+                        tasklist[p] = []
+                    if var not in tasklist[p]:
+                        tasklist[p].append(var)
+
+            bb.build.add_tasks(tasklist, d)
 
             # now add the handlers
             if not len(all_handlers) == 0:
index c55a58da2b09f34b674e1ef99df0742a3eed1f5d..3dfae219d231d4e413caedcac22be080f3cf8b15 100644 (file)
@@ -137,7 +137,7 @@ class RunQueue:
                             dep = taskData.fn_index[depdata]
                             depends.append(taskData.gettask_id(dep, idepend.split(":")[1]))
 
-                def add_recursive_build(depid):
+                def add_recursive_build(depid, depfnid):
                     """
                     Add build depends of depid to depends
                     (if we've not see it before)
@@ -150,26 +150,28 @@ class RunQueue:
                         depdata = taskData.build_targets[depid][0]
                         if depdata is not None:
                             dep = taskData.fn_index[depdata]
+                            idepends = []
                             # Need to avoid creating new tasks here
                             taskid = taskData.gettask_id(dep, taskname, False)
                             if taskid is not None:
                                 depends.append(taskid)
                                 fnid = taskData.tasks_fnid[taskid]
+                                idepends = taskData.tasks_idepends[taskid]
+                                #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
                             else:
                                 fnid = taskData.getfn_id(dep)
                             for nextdepid in taskData.depids[fnid]:
                                 if nextdepid not in dep_seen:
-                                    add_recursive_build(nextdepid)
+                                    add_recursive_build(nextdepid, fnid)
                             for nextdepid in taskData.rdepids[fnid]:
                                 if nextdepid not in rdep_seen:
-                                    add_recursive_run(nextdepid)
-                            idepends = taskData.tasks_idepends[depid]
+                                    add_recursive_run(nextdepid, fnid)
                             for idepend in idepends:
                                 nextdepid = int(idepend.split(":")[0])
                                 if nextdepid not in dep_seen:
-                                    add_recursive_build(nextdepid)
+                                    add_recursive_build(nextdepid, fnid)
 
-                def add_recursive_run(rdepid):
+                def add_recursive_run(rdepid, depfnid):
                     """
                     Add runtime depends of rdepid to depends
                     (if we've not see it before)
@@ -182,24 +184,26 @@ class RunQueue:
                         depdata = taskData.run_targets[rdepid][0]
                         if depdata is not None:
                             dep = taskData.fn_index[depdata]
+                            idepends = []
                             # Need to avoid creating new tasks here
                             taskid = taskData.gettask_id(dep, taskname, False)
                             if taskid is not None:
                                 depends.append(taskid)
                                 fnid = taskData.tasks_fnid[taskid]
+                                idepends = taskData.tasks_idepends[taskid]
+                                #print "Added %s (%s) due to %s" % (taskid, taskData.fn_index[fnid], taskData.fn_index[depfnid])
                             else:
                                 fnid = taskData.getfn_id(dep)
                             for nextdepid in taskData.depids[fnid]:
                                 if nextdepid not in dep_seen:
-                                    add_recursive_build(nextdepid)
+                                    add_recursive_build(nextdepid, fnid)
                             for nextdepid in taskData.rdepids[fnid]:
                                 if nextdepid not in rdep_seen:
-                                    add_recursive_run(nextdepid)
-                            idepends = taskData.tasks_idepends[rdepid]
+                                    add_recursive_run(nextdepid, fnid)
                             for idepend in idepends:
                                 nextdepid = int(idepend.split(":")[0])
                                 if nextdepid not in dep_seen:
-                                    add_recursive_build(nextdepid)
+                                    add_recursive_build(nextdepid, fnid)
 
 
                 # Resolve Recursive Runtime Depends
@@ -210,12 +214,12 @@ class RunQueue:
                         rdep_seen = []
                         idep_seen = []
                         for depid in taskData.depids[fnid]:
-                            add_recursive_build(depid)
+                            add_recursive_build(depid, fnid)
                         for rdepid in taskData.rdepids[fnid]:
-                            add_recursive_run(rdepid)
+                            add_recursive_run(rdepid, fnid)
                         for idepend in idepends:
                             depid = int(idepend.split(":")[0])
-                            add_recursive_build(depid)
+                            add_recursive_build(depid, fnid)
 
                 #Prune self references
                 if task in depends:
index f448b5b6660a5769bc2bb62ff06405e43ff47294..902cc140ef6871766350aa5a59d39f3aea19ca38 100644 (file)
@@ -450,10 +450,12 @@ class TaskData:
             self.add_runtime_target(fn, item)
             self.add_tasks(fn, dataCache)
 
-    def fail_fnid(self, fnid):
+    def fail_fnid(self, fnid, missing_list = []):
         """
         Mark a file as failed (unbuildable)
         Remove any references from build and runtime provider lists
+
+        missing_list, A list of missing requirements for this target
         """
         if fnid in self.failed_fnids:
             return
@@ -463,14 +465,14 @@ class TaskData:
             if fnid in self.build_targets[target]:
                 self.build_targets[target].remove(fnid)
                 if len(self.build_targets[target]) == 0:
-                    self.remove_buildtarget(target)
+                    self.remove_buildtarget(target, missing_list)
         for target in self.run_targets:
             if fnid in self.run_targets[target]:
                 self.run_targets[target].remove(fnid)
                 if len(self.run_targets[target]) == 0:
-                    self.remove_runtarget(target)
+                    self.remove_runtarget(target, missing_list)
 
-    def remove_buildtarget(self, targetid):
+    def remove_buildtarget(self, targetid, missing_list = []):
         """
         Mark a build target as failed (unbuildable)
         Trigger removal of any files that have this as a dependency
@@ -479,21 +481,21 @@ class TaskData:
         self.failed_deps.append(targetid)
         dependees = self.get_dependees(targetid)
         for fnid in dependees:
-            self.fail_fnid(fnid)
+            self.fail_fnid(fnid, [self.build_names_index[targetid]]+missing_list)
         if self.abort and targetid in self.external_targets:
-            bb.msg.error(bb.msg.domain.Provider, "No buildable providers available for required build target %s" % self.build_names_index[targetid])
+            bb.msg.error(bb.msg.domain.Provider, "No buildable providers available for required build target %s ('%s')" % (self.build_names_index[targetid], missing_list))
             raise bb.providers.NoProvider
 
-    def remove_runtarget(self, targetid):
+    def remove_runtarget(self, targetid, missing_list = []):
         """
         Mark a run target as failed (unbuildable)
         Trigger removal of any files that have this as a dependency
         """
-        bb.msg.note(1, bb.msg.domain.Provider, "Removing failed runtime build target %s" % self.run_names_index[targetid])
+        bb.msg.note(1, bb.msg.domain.Provider, "Removing failed runtime build target %s  ('%s')" % (self.run_names_index[targetid], missing_list))
         self.failed_rdeps.append(targetid)
         dependees = self.get_rdependees(targetid)
         for fnid in dependees:
-            self.fail_fnid(fnid)
+            self.fail_fnid(fnid, [self.run_names_index[targetid]]+missing_list)
 
     def add_unresolved(self, cfgData, dataCache):
         """
@@ -529,14 +531,26 @@ class TaskData:
         """
         bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:")
         bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.build_names_index))
+
         bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:")
         bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.run_names_index))
+
         bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:")
-        for target in self.build_targets.keys():
-            bb.msg.debug(3, bb.msg.domain.TaskData, " %s: %s" % (self.build_names_index[target], self.build_targets[target]))
+        for buildid in range(len(self.build_names_index)):
+            target = self.build_names_index[buildid]
+            targets = "None"
+            if buildid in self.build_targets:
+                targets = self.build_targets[buildid]
+            bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (buildid, target, targets))
+
         bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:")
-        for target in self.run_targets.keys():
-            bb.msg.debug(3, bb.msg.domain.TaskData, " %s: %s" % (self.run_names_index[target], self.run_targets[target]))
+        for runid in range(len(self.run_names_index)):
+            target = self.run_names_index[runid]
+            targets = "None"
+            if runid in self.run_targets:
+                targets = self.run_targets[runid]
+            bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (runid, target, targets))
+
         bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:")
         for task in range(len(self.tasks_name)):
             bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % (
@@ -544,6 +558,7 @@ class TaskData:
                 self.fn_index[self.tasks_fnid[task]], 
                 self.tasks_name[task], 
                 self.tasks_tdepends[task]))
+
         bb.msg.debug(3, bb.msg.domain.TaskData, "runtime ids (per fn):")
         for fnid in self.rdepids:
             bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid]))