]> code.ossystems Code Review - openembedded-core.git/commitdiff
bitbake: Sync with bitbake upstream for various fixes
authorRichard Purdie <richard@openedhand.com>
Sun, 6 Jan 2008 16:51:51 +0000 (16:51 +0000)
committerRichard Purdie <richard@openedhand.com>
Sun, 6 Jan 2008 16:51:51 +0000 (16:51 +0000)
git-svn-id: https://svn.o-hand.com/repos/poky/trunk@3411 311d38ba-8fff-0310-9ca6-ca027cbcb966

bitbake/ChangeLog
bitbake/lib/bb/cooker.py
bitbake/lib/bb/runqueue.py
bitbake/lib/bb/shell.py

index 4fddeca86c9226970dc77c175bd4c1d4261c8386..4f3eafe2baf67a279db2bfeda28c4da2cedc0b10 100644 (file)
@@ -25,6 +25,16 @@ Changes in BitBake 1.8.x:
          faster at the expense of not creating mirror tarballs.
        - SRCREV handling updates, improvements and fixes from Poky
        - Add bb.utils.lockfile() and bb.utils.unlockfile() from Poky
+       - Add support for task selfstamp and lockfiles flags
+       - Disable task number acceleration since it can allow the tasks to run 
+         out of sequence
+       - Improve runqueue code comments
+       - Add task scheduler abstraction and some example schedulers
+       - Improve circular dependency chain debugging code and user feedback
+       - Don't give a stacktrace for invalid tasks, have a user friendly message (#3431)
+       - Add support for "-e target" (#3432)
+       - Fix shell showdata command (#3259)
+       - Fix shell data updating problems (#1880)
 
 Changes in Bitbake 1.8.8:
        - Rewrite svn fetcher to make adding extra operations easier 
index c16709e552ff35d1d894f17e8e2d3f73629888b5..c6597c52f934b175970735c21b9e28ae76d78716 100644 (file)
@@ -67,6 +67,10 @@ class BBCooker:
         if not self.configuration.cmd:
             self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data) or "build"
 
+        bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, True)
+        if bbpkgs:
+            self.configuration.pkgs_to_build.extend(bbpkgs.split())
+
         #
         # Special updated configuration we use for firing events
         #
@@ -158,29 +162,68 @@ class BBCooker:
                                         prefstr)
 
 
-    def showEnvironment( self ):
-        """Show the outer or per-package environment"""
-        if self.configuration.buildfile:
+    def showEnvironment(self , buildfile = None, pkgs_to_build = []):
+        """
+        Show the outer or per-package environment
+        """
+        fn = None
+        envdata = None
+
+        if 'world' in pkgs_to_build:
+            print "'world' is not a valid target for --environment."
+            sys.exit(1)
+
+        if len(pkgs_to_build) > 1:
+            print "Only one target can be used with the --environment option."
+            sys.exit(1)
+
+        if buildfile:
+            if len(pkgs_to_build) > 0:
+                print "No target should be used with the --environment and --buildfile options."
+                sys.exit(1)
             self.cb = None
             self.bb_cache = bb.cache.init(self)
-            bf = self.matchFile(self.configuration.buildfile)
+            fn = self.matchFile(buildfile)
+        elif len(pkgs_to_build) == 1:
+            self.updateCache()
+
+            localdata = data.createCopy(self.configuration.data)
+            bb.data.update_data(localdata)
+            bb.data.expandKeys(localdata)
+
+            taskdata = bb.taskdata.TaskData(self.configuration.abort)
+
+            try:
+                taskdata.add_provider(localdata, self.status, pkgs_to_build[0])
+                taskdata.add_unresolved(localdata, self.status)
+            except bb.providers.NoProvider:
+                sys.exit(1)
+
+            targetid = taskdata.getbuild_id(pkgs_to_build[0])
+            fnid = taskdata.build_targets[targetid][0]
+            fn = taskdata.fn_index[fnid]
+        else:
+            envdata = self.configuration.data
+
+        if fn:
             try:
-                self.configuration.data = self.bb_cache.loadDataFull(bf, self.configuration.data)
+                envdata = self.bb_cache.loadDataFull(fn, self.configuration.data)
             except IOError, e:
-                bb.msg.fatal(bb.msg.domain.Parsing, "Unable to read %s: %s" % (bf, e))
+                bb.msg.fatal(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e))
             except Exception, e:
                 bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e)
+
         # emit variables and shell functions
         try:
-            data.update_data( self.configuration.data )
-            data.emit_env(sys.__stdout__, self.configuration.data, True)
+            data.update_data( envdata )
+            data.emit_env(sys.__stdout__, envdata, True)
         except Exception, e:
             bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e)
         # emit the metadata which isnt valid shell
-        data.expandKeys( self.configuration.data )     
-        for e in self.configuration.data.keys():
-            if data.getVarFlag( e, 'python', self.configuration.data ):
-                sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, self.configuration.data, 1)))
+        data.expandKeys( envdata )     
+        for e in envdata.keys():
+            if data.getVarFlag( e, 'python', envdata ):
+                sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1)))
 
     def generateDotGraph( self, pkgs_to_build, ignore_deps ):
         """
@@ -338,8 +381,6 @@ class BBCooker:
         except ImportError, details:
             bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details )
         else:
-            bb.data.update_data( self.configuration.data )
-            bb.data.expandKeys( self.configuration.data )
             shell.start( self )
             sys.exit( 0 )
 
@@ -519,7 +560,7 @@ class BBCooker:
         """
 
         if self.configuration.show_environment:
-            self.showEnvironment()
+            self.showEnvironment(self.configuration.buildfile, self.configuration.pkgs_to_build)
             sys.exit( 0 )
 
         self.buildSetVars()
@@ -539,11 +580,7 @@ class BBCooker:
 
         pkgs_to_build = self.configuration.pkgs_to_build
 
-        bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, 1)
-        if bbpkgs:
-            pkgs_to_build.extend(bbpkgs.split())
-        if len(pkgs_to_build) == 0 and not self.configuration.show_versions \
-                             and not self.configuration.show_environment:
+        if len(pkgs_to_build) == 0 and not self.configuration.show_versions:
                 print "Nothing to do.  Use 'bitbake world' to build everything, or run 'bitbake --help'"
                 print "for usage information."
                 sys.exit(0)
index 2f80dd4c880c4e38efc93358ff07542577c17c2e..68ef3a722f3e0e83359ae6f014e3ee874c111000 100644 (file)
@@ -51,6 +51,88 @@ class RunQueueStats:
     def taskSkipped(self):
         self.skipped = self.skipped + 1
 
+class RunQueueScheduler:
+    """
+    Control the order tasks are scheduled in.
+    """
+    def __init__(self, runqueue):
+        """
+        The default scheduler just returns the first buildable task (the 
+        priority map is sorted by task numer)
+        """
+        self.rq = runqueue
+        numTasks = len(self.rq.runq_fnid)
+
+        self.prio_map = []
+        self.prio_map.extend(range(numTasks))
+
+    def next(self):
+        """
+        Return the id of the first task we find that is buildable
+        """
+        for task1 in range(len(self.rq.runq_fnid)):
+            task = self.prio_map[task1]
+            if self.rq.runq_running[task] == 1:
+                continue
+            if self.rq.runq_buildable[task] == 1:
+                return task
+
+class RunQueueSchedulerSpeed(RunQueueScheduler):
+    """
+    A scheduler optimised for speed. The priority map is sorted by task weight,
+    heavier weighted tasks (tasks needed by the most other tasks) are run first.
+    """
+    def __init__(self, runqueue):
+        """
+        The priority map is sorted by task weight.
+        """
+        from copy import deepcopy
+
+        self.rq = runqueue
+
+        sortweight = deepcopy(self.rq.runq_weight)
+        sortweight.sort()
+        copyweight = deepcopy(self.rq.runq_weight)
+        self.prio_map = []
+
+        for weight in sortweight:
+            idx = copyweight.index(weight)
+            self.prio_map.append(idx)
+            copyweight[idx] = -1
+
+        self.prio_map.reverse()
+
+class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
+    """
+    A scheduler optimised to complete .bb files are quickly as possible. The 
+    priority map is sorted by task weight, but then reordered so once a given 
+    .bb file starts to build, its completed as quickly as possible. This works
+    well where disk space is at a premium and classes like OE's rm_work are in 
+    force.
+    """
+    def __init__(self, runqueue):
+        RunQueueSchedulerSpeed.__init__(self, runqueue)
+        from copy import deepcopy
+
+        #FIXME - whilst this groups all fnids together it does not reorder the
+        #fnid groups optimally.
+        basemap = deepcopy(self.prio_map)
+        self.prio_map = []
+        while (len(basemap) > 0):
+            entry = basemap.pop(0)
+            self.prio_map.append(entry)
+            fnid = self.rq.runq_fnid[entry]
+            todel = []
+            for entry in basemap:
+                entry_fnid = self.rq.runq_fnid[entry]
+                if entry_fnid == fnid:
+                    todel.append(basemap.index(entry))
+                    self.prio_map.append(entry)
+            todel.reverse()
+            for idx in todel:
+                del basemap[idx]
+
 class RunQueue:
     """
     BitBake Run Queue implementation
@@ -71,14 +153,158 @@ class RunQueue:
         self.runq_task = []
         self.runq_depends = []
         self.runq_revdeps = []
-        self.runq_weight = []
-        self.prio_map = []
 
     def get_user_idstring(self, task):
         fn = self.taskData.fn_index[self.runq_fnid[task]]
         taskname = self.runq_task[task]
         return "%s, %s" % (fn, taskname)
 
+    def circular_depchains_handler(self, tasks):
+        """
+        Some tasks aren't buildable, likely due to circular dependency issues.
+        Identify the circular dependencies and print them in a user readable format.
+        """
+        from copy import deepcopy
+
+        valid_chains = []
+        explored_deps = {}
+        msgs = []
+
+        def chain_reorder(chain):
+            """
+            Reorder a dependency chain so the lowest task id is first
+            """
+            lowest = 0
+            new_chain = []
+            for entry in range(len(chain)):
+                if chain[entry] < chain[lowest]:
+                    lowest = entry
+            new_chain.extend(chain[lowest:])
+            new_chain.extend(chain[:lowest])
+            return new_chain
+
+        def chain_compare_equal(chain1, chain2):
+            """
+            Compare two dependency chains and see if they're the same
+            """
+            if len(chain1) != len(chain2):
+                return False
+            for index in range(len(chain1)):
+                if chain1[index] != chain2[index]:
+                    return False
+            return True
+            
+        def chain_array_contains(chain, chain_array):
+            """
+            Return True if chain_array contains chain
+            """
+            for ch in chain_array:
+                if chain_compare_equal(ch, chain):
+                    return True
+            return False
+
+        def find_chains(taskid, prev_chain):
+            prev_chain.append(taskid)
+            total_deps = []
+            total_deps.extend(self.runq_revdeps[taskid])
+            for revdep in self.runq_revdeps[taskid]:
+                if revdep in prev_chain:
+                    idx = prev_chain.index(revdep)
+                    # To prevent duplicates, reorder the chain to start with the lowest taskid
+                    # and search through an array of those we've already printed
+                    chain = prev_chain[idx:]
+                    new_chain = chain_reorder(chain)
+                    if not chain_array_contains(new_chain, valid_chains):
+                        valid_chains.append(new_chain)
+                        msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
+                        for dep in new_chain:
+                            msgs.append("  Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep]))
+                        msgs.append("\n")
+                    if len(valid_chains) > 10:
+                        msgs.append("Aborted dependency loops search after 10 matches.\n")
+                        return msgs
+                    continue
+                scan = False
+                if revdep not in explored_deps:
+                    scan = True
+                elif revdep in explored_deps[revdep]:
+                    scan = True
+                else:
+                    for dep in prev_chain:
+                        if dep in explored_deps[revdep]:
+                            scan = True
+                if scan:
+                    find_chains(revdep, deepcopy(prev_chain))
+                for dep in explored_deps[revdep]:
+                    if dep not in total_deps:
+                        total_deps.append(dep)
+
+            explored_deps[taskid] = total_deps
+
+        for task in tasks:
+            find_chains(task, [])
+
+        return msgs
+
+    def calculate_task_weights(self, endpoints):
+        """
+        Calculate a number representing the "weight" of each task. Heavier weighted tasks 
+        have more dependencies and hence should be executed sooner for maximum speed.
+
+        This function also sanity checks the task list finding tasks that its not
+        possible to execute due to circular dependencies.
+        """
+
+        numTasks = len(self.runq_fnid)
+        weight = []
+        deps_left = []
+        task_done = []
+
+        for listid in range(numTasks):
+            task_done.append(False)
+            weight.append(0)
+            deps_left.append(len(self.runq_revdeps[listid]))
+
+        for listid in endpoints:
+            weight[listid] = 1
+            task_done[listid] = True
+
+        while 1:
+            next_points = []
+            for listid in endpoints:
+                for revdep in self.runq_depends[listid]:
+                    weight[revdep] = weight[revdep] + weight[listid]
+                    deps_left[revdep] = deps_left[revdep] - 1
+                    if deps_left[revdep] == 0:
+                        next_points.append(revdep)
+                        task_done[revdep] = True
+            endpoints = next_points
+            if len(next_points) == 0:
+                break      
+
+        # Circular dependency sanity check
+        problem_tasks = []
+        for task in range(numTasks):
+            if task_done[task] is False or deps_left[task] != 0:
+                problem_tasks.append(task)
+                bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task)))
+                bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task]))
+
+        if problem_tasks:
+            message = "Unbuildable tasks were found.\n"
+            message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
+            message = message + "Identifying dependency loops (this may take a short while)...\n"
+            bb.msg.error(bb.msg.domain.RunQueue, message)
+
+            msgs = self.circular_depchains_handler(problem_tasks)
+
+            message = "\n"
+            for msg in msgs:
+                message = message + msg
+            bb.msg.fatal(bb.msg.domain.RunQueue, message)
+
+        return weight
+
     def prepare_runqueue(self):
         """
         Turn a set of taskData into a RunQueue and compute data needed 
@@ -86,9 +312,7 @@ class RunQueue:
         """
 
         depends = []
-        runq_weight1 = []
         runq_build = []
-        runq_done = []
 
         taskData = self.taskData
 
@@ -98,6 +322,17 @@ class RunQueue:
 
         bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue")
 
+        # Step A - Work out a list of tasks to run
+        #
+        # Taskdata gives us a list of possible providers for a every target 
+        # ordered by priority (build_targets, run_targets). It also gives
+        # information on each of those providers.
+        #
+        # To create the actual list of tasks to execute we fix the list of 
+        # providers and then resolve the dependencies into task IDs. This 
+        # process is repeated for each type of dependency (tdepends, deptask, 
+        # rdeptast, recrdeptask, idepends).
+
         for task in range(len(taskData.tasks_name)):
             fnid = taskData.tasks_fnid[task]
             fn = taskData.fn_index[fnid]
@@ -105,9 +340,15 @@ class RunQueue:
 
             if fnid not in taskData.failed_fnids:
 
+                # Resolve task internal dependencies 
+                #
+                # e.g. addtask before X after Y
                 depends = taskData.tasks_tdepends[task]
 
-                # Resolve Depends
+                # Resolve 'deptask' dependencies 
+                #
+                # e.g. do_sometask[deptask] = "do_someothertask"
+                # (makes sure sometask runs after someothertask of all DEPENDS)
                 if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']:
                     tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
                     for depid in taskData.depids[fnid]:
@@ -119,7 +360,10 @@ class RunQueue:
                                 for taskname in tasknames:
                                     depends.append(taskData.gettask_id(dep, taskname))
 
-                # Resolve Runtime Depends
+                # Resolve 'rdeptask' dependencies 
+                #
+                # e.g. do_sometask[rdeptask] = "do_someothertask"
+                # (makes sure sometask runs after someothertask of all RDEPENDS)
                 if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
                     taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
                     for depid in taskData.rdepids[fnid]:
@@ -129,6 +373,10 @@ class RunQueue:
                                 dep = taskData.fn_index[depdata]
                                 depends.append(taskData.gettask_id(dep, taskname))
 
+                # Resolve inter-task dependencies 
+                #
+                # e.g. do_sometask[depends] = "targetname:do_someothertask"
+                # (makes sure sometask runs after targetname's someothertask)
                 idepends = taskData.tasks_idepends[task]
                 for idepend in idepends:
                     depid = int(idepend.split(":")[0])
@@ -207,9 +455,10 @@ class RunQueue:
                                 if nextdepid not in dep_seen:
                                     add_recursive_build(nextdepid, fnid)
 
-
-                # Resolve Recursive Runtime Depends
-                # Also includes all thier build depends, intertask depends and runtime depends
+                # Resolve recursive 'recrdeptask' dependencies 
+                #
+                # e.g. do_sometask[recrdeptask] = "do_someothertask"
+                # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
                 if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
                     for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split():
                         dep_seen = []
@@ -223,7 +472,7 @@ class RunQueue:
                             depid = int(idepend.split(":")[0])
                             add_recursive_build(depid, fnid)
 
-                #Prune self references
+                # Rmove all self references
                 if task in depends:
                     newdep = []
                     bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
@@ -237,11 +486,14 @@ class RunQueue:
             self.runq_task.append(taskData.tasks_name[task])
             self.runq_depends.append(Set(depends))
             self.runq_revdeps.append(Set())
-            self.runq_weight.append(0)
 
-            runq_weight1.append(0)
             runq_build.append(0)
-            runq_done.append(0)
+
+        # Step B - Mark all active tasks
+        #
+        # Start with the tasks we were asked to run and mark all dependencies
+        # as active too. If the task is to be 'forced', clear its stamp. Once
+        # all active tasks are marked, prune the ones we don't need.
 
         bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks")
 
@@ -280,11 +532,17 @@ class RunQueue:
             if fnid in taskData.failed_fnids:
                 continue
 
+            if target[1] not in taskData.tasks_lookup[fnid]:
+                bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0]))
+
             listid = taskData.tasks_lookup[fnid][target[1]]
 
             mark_active(listid, 1)
 
-        # Prune inactive tasks
+        # Step C - Prune all inactive tasks
+        #
+        # Once all active tasks are marked, prune the ones we don't need.
+
         maps = []
         delcount = 0
         for listid in range(len(self.runq_fnid)):
@@ -294,14 +552,16 @@ class RunQueue:
                 del self.runq_fnid[listid-delcount]
                 del self.runq_task[listid-delcount]
                 del self.runq_depends[listid-delcount]
-                del self.runq_weight[listid-delcount]
-                del runq_weight1[listid-delcount]
                 del runq_build[listid-delcount]
-                del runq_done[listid-delcount]
                 del self.runq_revdeps[listid-delcount]
                 delcount = delcount + 1
                 maps.append(-1)
 
+        #
+        # Step D - Sanity checks and computation
+        #
+
+        # Check to make sure we still have tasks to run
         if len(self.runq_fnid) == 0:
             if not taskData.abort:
                 bb.msg.note(1, bb.msg.domain.RunQueue, "All possible tasks have been run but build incomplete (--continue mode). See errors above for incomplete tasks.")
@@ -310,6 +570,8 @@ class RunQueue:
 
         bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid)))
 
+        # Remap the dependencies to account for the deleted tasks
+        # Check we didn't delete a task we depend on
         for listid in range(len(self.runq_fnid)):
             newdeps = []
             origdeps = self.runq_depends[listid]
@@ -321,62 +583,37 @@ class RunQueue:
 
         bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings")
 
+        # Generate a list of reverse dependencies to ease future calculations
         for listid in range(len(self.runq_fnid)):
             for dep in self.runq_depends[listid]:
                 self.runq_revdeps[dep].add(listid)
 
+        # Identify tasks at the end of dependency chains
+        # Error on circular dependency loops (length two)
         endpoints = []
         for listid in range(len(self.runq_fnid)):
             revdeps = self.runq_revdeps[listid]
             if len(revdeps) == 0:
-                runq_done[listid] = 1
-                self.runq_weight[listid] = 1
                 endpoints.append(listid)
             for dep in revdeps:
                 if dep in self.runq_depends[listid]:
                     #self.dump_data(taskData)
                     bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid]))
-            runq_weight1[listid] = len(revdeps)
 
         bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
 
-        while 1:
-            next_points = []
-            for listid in endpoints:
-                for revdep in self.runq_depends[listid]:
-                    self.runq_weight[revdep] = self.runq_weight[revdep] + self.runq_weight[listid]
-                    runq_weight1[revdep] = runq_weight1[revdep] - 1
-                    if runq_weight1[revdep] == 0:
-                        next_points.append(revdep)
-                        runq_done[revdep] = 1
-            endpoints = next_points
-            if len(next_points) == 0:
-                break           
 
-        # Sanity Checks
-        for task in range(len(self.runq_fnid)):
-            if runq_done[task] == 0:
-                seen = []
-                deps_seen = []
-                def print_chain(taskid, finish):
-                    seen.append(taskid)
-                    for revdep in self.runq_revdeps[taskid]:
-                        if runq_done[revdep] == 0 and revdep not in seen and not finish:
-                            bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) (depends: %s)" % (revdep, self.get_user_idstring(revdep), self.runq_depends[revdep]))
-                            if revdep in deps_seen:
-                                bb.msg.error(bb.msg.domain.RunQueue, "Chain ends at Task %s (%s)" % (revdep, self.get_user_idstring(revdep)))
-                                finish = True
-                                return
-                            for dep in self.runq_depends[revdep]:
-                                deps_seen.append(dep)
-                            print_chain(revdep, finish)
-                print_chain(task, False)
-                bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) not processed!\nThis is probably a circular dependency (the chain might be printed above)." % (task, self.get_user_idstring(task)))
-            if runq_weight1[task] != 0:
-                bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) count not zero!" % (task, self.get_user_idstring(task)))
-
-
-        # Check for multiple tasks building the same provider
+        # Calculate task weights 
+        # Check of higher length circular dependencies
+        self.runq_weight = self.calculate_task_weights(endpoints)
+
+        # Decide what order to execute the tasks in, pick a scheduler
+        # FIXME - Allow user selection
+        #self.sched = RunQueueScheduler(self)
+        self.sched = RunQueueSchedulerSpeed(self)
+        #self.sched = RunQueueSchedulerCompletion(self)
+
+        # Sanity Check - Check for multiple tasks building the same provider
         prov_list = {}
         seen_fn = []
         for task in range(len(self.runq_fnid)):
@@ -397,21 +634,6 @@ class RunQueue:
         #if error:
         #    bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...")
 
-
-        # Make a weight sorted map
-        from copy import deepcopy
-
-        sortweight = deepcopy(self.runq_weight)
-        sortweight.sort()
-        copyweight = deepcopy(self.runq_weight)
-        self.prio_map = []
-
-        for weight in sortweight:
-            idx = copyweight.index(weight)
-            self.prio_map.append(idx)
-            copyweight[idx] = -1
-        self.prio_map.reverse()
-
         #self.dump_data(taskData)
 
     def execute_runqueue(self):
@@ -483,18 +705,6 @@ class RunQueue:
                 taskname = self.runq_task[revdep]
                 bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname))
 
-    def get_next_task(self):
-        """
-        Return the id of the highest priority task that is buildable
-        """
-        for task1 in range(len(self.runq_fnid)):
-            task = self.prio_map[task1]
-            if self.runq_running[task] == 1:
-                continue
-            if self.runq_buildable[task] == 1:
-                return task
-        return None
-
     def execute_runqueue_internal(self):
         """
         Run the tasks in a queue prepared by prepare_runqueue
@@ -511,20 +721,21 @@ class RunQueue:
         def sigint_handler(signum, frame):
             raise KeyboardInterrupt
 
+        # RP - this code allows tasks to run out of the correct order - disabled, FIXME
         # Find any tasks with current stamps and remove them from the queue
-        for task1 in range(len(self.runq_fnid)):
-            task = self.prio_map[task1]
-            fn = self.taskData.fn_index[self.runq_fnid[task]]
-            taskname = self.runq_task[task]
-            if bb.build.stamp_is_current(taskname, self.dataCache, fn):
-                bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
-                self.runq_running[task] = 1
-                self.task_complete(task)
-                self.stats.taskCompleted()
-                self.stats.taskSkipped()
+        #for task1 in range(len(self.runq_fnid)):
+        #    task = self.prio_map[task1]
+        #    fn = self.taskData.fn_index[self.runq_fnid[task]]
+        #    taskname = self.runq_task[task]
+        #    if bb.build.stamp_is_current(taskname, self.dataCache, fn):
+        #        bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task)))
+        #        self.runq_running[task] = 1
+        #        self.task_complete(task)
+        #        self.stats.taskCompleted()
+        #        self.stats.taskSkipped()
 
         while True:
-            task = self.get_next_task()
+            task = self.sched.next()
             if task is not None:
                 fn = self.taskData.fn_index[self.runq_fnid[task]]
 
index fc213c3f4a6b77dde4d4af77c7c252762db189cf..745091fb7df8053f569cb6bb0e4df606d923befd 100644 (file)
@@ -68,7 +68,6 @@ leave_mainloop = False
 last_exception = None
 cooker = None
 parsed = False
-initdata = None
 debug = os.environ.get( "BBSHELL_DEBUG", "" )
 
 ##########################################################################
@@ -156,11 +155,14 @@ class BitBakeShellCommands:
         cooker.configuration.cmd = cmd
 
         td = taskdata.TaskData(cooker.configuration.abort)
+        localdata = data.createCopy(cooker.configuration.data)
+        data.update_data(localdata)
+        data.expandKeys(localdata)
 
         try:
             tasks = []
             for name in names:
-                td.add_provider(cooker.configuration.data, cooker.status, name)
+                td.add_provider(localdata, cooker.status, name)
                 providers = td.get_provider(name)
 
                 if len(providers) == 0:
@@ -168,9 +170,9 @@ class BitBakeShellCommands:
 
                 tasks.append([name, "do_%s" % cooker.configuration.cmd])
 
-            td.add_unresolved(cooker.configuration.data, cooker.status)
+            td.add_unresolved(localdata, cooker.status)
             
-            rq = runqueue.RunQueue(cooker, cooker.configuration.data, cooker.status, td, tasks)
+            rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks)
             rq.prepare_runqueue()
             rq.execute_runqueue()
 
@@ -217,8 +219,8 @@ class BitBakeShellCommands:
     edit.usage = "<providee>"
 
     def environment( self, params ):
-        """Dump out the outer BitBake environment (see bbread)"""
-        data.emit_env(sys.__stdout__, cooker.configuration.data, True)
+        """Dump out the outer BitBake environment"""
+        cooker.showEnvironment()
 
     def exit_( self, params ):
         """Leave the BitBake Shell"""
@@ -241,11 +243,10 @@ class BitBakeShellCommands:
         oldcmd = cooker.configuration.cmd
         cooker.configuration.cmd = cmd
 
-        thisdata = copy.deepcopy( initdata )
-        # Caution: parse.handle modifies thisdata, hence it would
-        # lead to pollution cooker.configuration.data, which is
-        # why we use it on a safe copy we obtained from cooker right after
-        # parsing the initial *.conf files
+        thisdata = data.createCopy(cooker.configuration.data)
+        data.update_data(thisdata)
+        data.expandKeys(thisdata)
+
         try:
             bbfile_data = parse.handle( bf, thisdata )
         except parse.ParseError:
@@ -393,6 +394,11 @@ SRC_URI = ""
             os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) )
     new.usage = "<directory> <filename>"
 
+    def package( self, params ):
+        """Execute 'package' on a providee"""
+        self.build( params, "package" )
+    package.usage = "<providee>"
+
     def pasteBin( self, params ):
         """Send a command + output buffer to the pastebin at http://rafb.net/paste"""
         index = params[0]
@@ -501,8 +507,8 @@ SRC_URI = ""
         interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version )
 
     def showdata( self, params ):
-        """Execute 'showdata' on a providee"""
-        self.build( params, "showdata" )
+        """Show the parsed metadata for a given providee"""
+        cooker.showEnvironment(None, params)
     showdata.usage = "<providee>"
 
     def setVar( self, params ):
@@ -732,10 +738,6 @@ class BitBakeShell:
 
         print __credits__
 
-        # save initial cooker configuration (will be reused in file*** commands)
-        global initdata
-        initdata = copy.deepcopy( cooker.configuration.data )
-
     def cleanup( self ):
         """Write readline history and clean up resources"""
         debugOut( "writing command history" )