diff mbox series

[scarthgap,2.8,4/8] runqueue: Process unihashes in parallel at init

Message ID 33686c80347491a39cbbd7a926eed5452e9aa168.1717244760.git.steve@sakoman.com
State New
Headers show
Series [scarthgap,2.8,1/8] runqueue: Add timing warnings around slow loops | expand

Commit Message

Steve Sakoman June 1, 2024, 12:27 p.m. UTC
From: Richard Purdie <richard.purdie@linuxfoundation.org>

Improve the runqueue init code to call unihash queries in parallel since
this is faster and more efficient, particularly on slower links with longer
round trip times.

The call to the function from cooker is unneeded since that function calls
prepare() and hence this functionality will already have run, so drop
that obsolete call.

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
---
 lib/bb/cooker.py   |  1 -
 lib/bb/runqueue.py | 18 ++++++++++--------
 2 files changed, 10 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/lib/bb/cooker.py b/lib/bb/cooker.py
index 939a99997..6318ef4a8 100644
--- a/lib/bb/cooker.py
+++ b/lib/bb/cooker.py
@@ -1459,7 +1459,6 @@  class BBCooker:
 
                     if t in task or getAllTaskSignatures:
                         try:
-                            rq.rqdata.prepare_task_hash(tid)
                             sig.append([pn, t, rq.rqdata.get_task_unihash(tid)])
                         except KeyError:
                             sig.append(self.getTaskSignatures(target, [t])[0])
diff --git a/lib/bb/runqueue.py b/lib/bb/runqueue.py
index 84a6f4172..999868dd7 100644
--- a/lib/bb/runqueue.py
+++ b/lib/bb/runqueue.py
@@ -1280,11 +1280,18 @@  class RunQueueData:
         dealtwith = set()
         todeal = set(self.runtaskentries)
         while todeal:
+            ready = set()
             for tid in todeal.copy():
                 if not (self.runtaskentries[tid].depends - dealtwith):
-                    dealtwith.add(tid)
-                    todeal.remove(tid)
-                    self.prepare_task_hash(tid)
+                    self.runtaskentries[tid].taskhash_deps = bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
+                    # get_taskhash for a given tid *must* be called before get_unihash* below
+                    self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
+                    ready.add(tid)
+            unihashes = bb.parse.siggen.get_unihashes(ready)
+            for tid in ready:
+                dealtwith.add(tid)
+                todeal.remove(tid)
+                self.runtaskentries[tid].unihash = unihashes[tid]
 
             bb.event.check_for_interrupts(self.cooker.data)
 
@@ -1301,11 +1308,6 @@  class RunQueueData:
         #self.dump_data()
         return len(self.runtaskentries)
 
-    def prepare_task_hash(self, tid):
-        self.runtaskentries[tid].taskhash_deps = bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
-        self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
-        self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
-
     def dump_data(self):
         """
         Dump some debug information on the internal data structures