Hi Muharem, Great branch this; I'm really impressed with the whole thing. Couple of minor nitpicks but otherwise this is r=me. > === modified file 'lib/lp/soyuz/model/buildqueue.py' > --- lib/lp/soyuz/model/buildqueue.py 2010-01-04 14:30:52 +0000 > +++ lib/lp/soyuz/model/buildqueue.py 2010-01-06 14:11:14 +0000 > @@ -212,6 +212,83 @@ > free_builders = result_set.get_one()[0] > return free_builders > > + def _estimateTimeToNextBuilder( > + self, head_job_processor, head_job_virtualized): > + """Estimate time until next builder becomes available. > + > + For the purpose of estimating the dispatch time of the job of interest > + (JOI) we need to know how long it will take until the job at the head > + of JOI's queue is dispatched. > + > + There are two cases to consider here: the head job is > + > + - processor dependent: only builders with the matching > + processor/virtualization combination should be considered. > + - *not* processor dependent: all builders should be considered. > + > + :param head_job_processor: The processor required by the job at the > + head of the queue. > + :param head_job_virtualized: The virtualization setting required by > + the job at the head of the queue. > + :return: The estimated number of seconds untils a builder capable of > + running the head job becomes available or None if no such builder > + exists. > + """ > + store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) > + > + # First check whether we have free builders. > + free_builders = self._freeBuildersCount( > + head_job_processor, head_job_virtualized) > + > + if free_builders > 0: > + # We have free builders for the given processor/virtualization > + # combination -> zero delay > + return 0 > + > + extra_clauses = '' > + if head_job_processor is not None: > + # Only look at builders with specific processor types. > + extra_clauses += """ > + AND Builder.processor = %s > + AND Builder.virtualized = %s > + """ % sqlvalues(head_job_processor, head_job_virtualized) > + > + params = sqlvalues(JobStatus.RUNNING) + (extra_clauses,) > + > + delay_query = """ > + SELECT MIN( > + CASE WHEN > + EXTRACT(EPOCH FROM > + (BuildQueue.estimated_duration - > + (((now() AT TIME ZONE 'UTC') - Job.date_started)))) >= 0 > + THEN > + EXTRACT(EPOCH FROM > + (BuildQueue.estimated_duration - > + (((now() AT TIME ZONE 'UTC') - Job.date_started)))) > + ELSE > + -- Assume that jobs that have overdrawn their estimated > + -- duration time budget will complete within 2 minutes. > + -- This is a wild guess but has worked well so far. As discussed on IRC, this comment should indicate that nothing will actually break if this wild guess is wrong. > + 120 > + END) > + FROM > + BuildQueue, Job, Builder > + WHERE > + BuildQueue.job = Job.id > + AND BuildQueue.builder = Builder.id > + AND Builder.manual = False > + AND Builder.builderok = True > + AND Job.status = %s > + %s > + """ % params > + > + result_set = store.execute(delay_query) > + head_job_delay = result_set.get_one()[0] > + if head_job_delay is None: > + return None > + else: > + return int(head_job_delay) > + > > class BuildQueueSet(object): > """Utility to deal with BuildQueue content class.""" > > === modified file 'lib/lp/soyuz/tests/test_buildqueue.py' > --- lib/lp/soyuz/tests/test_buildqueue.py 2010-01-04 10:58:09 +0000 > +++ lib/lp/soyuz/tests/test_buildqueue.py 2010-01-06 14:11:14 +0000 > @@ -4,7 +4,8 @@ > > """Test BuildQueue features.""" > > -from datetime import timedelta > +from datetime import datetime, timedelta > +from pytz import utc > > from zope.component import getUtility > > @@ -33,8 +34,6 @@ > > def nth_builder(test, build, n): > """Find nth builder that can execute the given build.""" > - def builder_key(build): > - return (build.processor.id,build.is_virtualized) > builder = None > builders = test.builders.get(builder_key(build), []) > try: > @@ -44,9 +43,9 @@ > return builder > > > -def assign_to_builder(test, job_name, builder_number): > +def assign_to_builder(test, job_name, builder_number, processor='386'): > """Simulate assigning a build to a builder.""" > - build, bq = find_job(test, job_name) > + build, bq = find_job(test, job_name, processor) > builder = nth_builder(test, build, builder_number) > bq.markAsBuilding(builder) > > @@ -61,6 +60,43 @@ > bq.estimated_duration, bq.lastscore) > > > +def builder_key(job): > + """Access key for builders capable of running the given job.""" > + return (job.processor.id, job.is_virtualized) > + > + > +def check_mintime_to_builder( > + test, bq, head_job_processor, head_job_virtualized, min_time): > + """Test the estimated time until a builder becomes available.""" > + delay = bq._estimateTimeToNextBuilder( > + head_job_processor, head_job_virtualized) > + if min_time is not None: > + test.assertTrue( > + almost_equal(delay, min_time), > + "Wrong min time to next available builder (%s != %s)" > + % (delay, min_time)) > + else: > + test.assertTrue(delay is None, "No delay to next builder available") > + > + > +def almost_equal(a, b, deviation=1): > + """Compare the values tolerating the given deviation. > + > + This used to spurious failures in time based tests. > + """ > + if abs(a - b) <= deviation: > + return True > + else: > + return False > + > + > +def set_remaining_time_for_running_job(bq, remainder): > + """Set remaining running time for job.""" > + offset = bq.estimated_duration.seconds - remainder > + bq.setDateStarted( > + datetime.utcnow().replace(tzinfo=utc) - timedelta(seconds=offset)) > + > + > class TestBuildQueueBase(TestCaseWithFactory): > """Setup the test publisher and some builders.""" > > @@ -84,45 +120,56 @@ > > # Next make seven 'hppa' builders. > processor_fam = ProcessorFamilySet().getByName('hppa') > - proc = processor_fam.processors[0] > - self.h1 = self.factory.makeBuilder(name='hppa-v-1', processor=proc) > - self.h2 = self.factory.makeBuilder(name='hppa-v-2', processor=proc) > - self.h3 = self.factory.makeBuilder(name='hppa-v-3', processor=proc) > - self.h4 = self.factory.makeBuilder(name='hppa-v-4', processor=proc) > + hppa_proc = processor_fam.processors[0] > + self.h1 = self.factory.makeBuilder( > + name='hppa-v-1', processor=hppa_proc) > + self.h2 = self.factory.makeBuilder( > + name='hppa-v-2', processor=hppa_proc) > + self.h3 = self.factory.makeBuilder( > + name='hppa-v-3', processor=hppa_proc) > + self.h4 = self.factory.makeBuilder( > + name='hppa-v-4', processor=hppa_proc) > self.h5 = self.factory.makeBuilder( > - name='hppa-n-5', processor=proc, virtualized=False) > + name='hppa-n-5', processor=hppa_proc, virtualized=False) > self.h6 = self.factory.makeBuilder( > - name='hppa-n-6', processor=proc, virtualized=False) > + name='hppa-n-6', processor=hppa_proc, virtualized=False) > self.h7 = self.factory.makeBuilder( > - name='hppa-n-7', processor=proc, virtualized=False) > + name='hppa-n-7', processor=hppa_proc, virtualized=False) > > # Finally make five 'amd64' builders. > processor_fam = ProcessorFamilySet().getByName('amd64') > - proc = processor_fam.processors[0] > - self.a1 = self.factory.makeBuilder(name='amd64-v-1', processor=proc) > - self.a2 = self.factory.makeBuilder(name='amd64-v-2', processor=proc) > - self.a3 = self.factory.makeBuilder(name='amd64-v-3', processor=proc) > + amd_proc = processor_fam.processors[0] > + self.a1 = self.factory.makeBuilder( > + name='amd64-v-1', processor=amd_proc) > + self.a2 = self.factory.makeBuilder( > + name='amd64-v-2', processor=amd_proc) > + self.a3 = self.factory.makeBuilder( > + name='amd64-v-3', processor=amd_proc) > self.a4 = self.factory.makeBuilder( > - name='amd64-n-4', processor=proc, virtualized=False) > + name='amd64-n-4', processor=amd_proc, virtualized=False) > self.a5 = self.factory.makeBuilder( > - name='amd64-n-5', processor=proc, virtualized=False) > + name='amd64-n-5', processor=amd_proc, virtualized=False) > > self.builders = dict() > + processor_fam = ProcessorFamilySet().getByName('x86') > + x86_proc = processor_fam.processors[0] > # x86 native > - self.builders[(1,False)] = [self.i6, self.i7, self.i8, self.i9] > + self.builders[(x86_proc.id,False)] = [ > + self.i6, self.i7, self.i8, self.i9] > # x86 virtual > - self.builders[(1,True)] = [ > + self.builders[(x86_proc.id,True)] = [ > self.i1, self.i2, self.i3, self.i4, self.i5] > > # amd64 native > - self.builders[(2,True)] = [self.a4, self.a5] > + self.builders[(amd_proc.id,False)] = [self.a4, self.a5] > # amd64 virtual > - self.builders[(2,False)] = [self.a1, self.a2, self.a3] > + self.builders[(amd_proc.id,True)] = [self.a1, self.a2, self.a3] > > # hppa native > - self.builders[(3,True)] = [self.h5, self.h6, self.h7] > + self.builders[(hppa_proc.id,False)] = [self.h5, self.h6, self.h7] > # hppa virtual > - self.builders[(3,False)] = [self.h1, self.h2, self.h3, self.h4] > + self.builders[(hppa_proc.id,True)] = [ > + self.h1, self.h2, self.h3, self.h4] > > # Ensure all builders are operational. > for builders in self.builders.values(): > @@ -135,14 +182,13 @@ > getUtility(IBuilderSet)['frog'].builderok = False > > > -class TestBuilderData(TestBuildQueueBase): > +class SingleArchBuildsBase(TestBuildQueueBase): > """Test the retrieval of builder related data. The latter is required > for job dispatch time estimations irrespective of job processor > architecture and virtualization setting.""" > - > def setUp(self): > """Set up some native x86 builds for the test archive.""" > - super(TestBuilderData, self).setUp() > + super(SingleArchBuildsBase, self).setUp() > # The builds will be set up as follows: > # > # gedit, p: 386, v:False e:0:01:00 *** s: 1001 > @@ -214,6 +260,11 @@ > bq.estimated_duration = timedelta(seconds=duration) > # print_build_setup(self.builds) > > + > +class TestBuilderData(SingleArchBuildsBase): > + """Test the retrieval of builder related data. The latter is required > + for job dispatch time estimations irrespective of job processor > + architecture and virtualization setting.""" > def test_builder_data(self): > # Make sure the builder numbers are correct. The builder data will > # be the same for all of our builds. > @@ -226,26 +277,32 @@ > builders_for_job, 4, > "[1] The total number of builders that can build the job in " > "question is wrong.") > + processor_fam = ProcessorFamilySet().getByName('x86') > + x86_proc = processor_fam.processors[0] > self.assertEqual( > - builder_stats[(1,False)], 4, > + builder_stats[(x86_proc.id,False)], 4, Really, really minor nitpick here, but with all these changes to use x86_proc.id you need to add a space after the comma. > "The number of native x86 builders is wrong") > self.assertEqual( > - builder_stats[(1,True)], 5, > + builder_stats[(x86_proc.id,True)], 5, > "The number of virtual x86 builders is wrong") > + processor_fam = ProcessorFamilySet().getByName('amd64') > + amd_proc = processor_fam.processors[0] > self.assertEqual( > - builder_stats[(2,False)], 2, > + builder_stats[(amd_proc.id,False)], 2, > "The number of native amd64 builders is wrong") > self.assertEqual( > - builder_stats[(2,True)], 3, > + builder_stats[(amd_proc.id,True)], 3, > "The number of virtual amd64 builders is wrong") > + processor_fam = ProcessorFamilySet().getByName('hppa') > + hppa_proc = processor_fam.processors[0] > self.assertEqual( > - builder_stats[(3,False)], 3, > + builder_stats[(hppa_proc.id,False)], 3, > "The number of native hppa builders is wrong") > self.assertEqual( > - builder_stats[(3,True)], 4, > + builder_stats[(hppa_proc.id,True)], 4, > "The number of virtual hppa builders is wrong") > # Disable the native x86 builders. > - for builder in self.builders[(1,False)]: > + for builder in self.builders[(x86_proc.id,False)]: > builder.builderok = False > # Get the builder statistics again. > builder_data = bq._getBuilderData() > @@ -257,7 +314,7 @@ > "[2] The total number of builders that can build the job in " > "question is wrong.") > # Re-enable one of them. > - for builder in self.builders[(1,False)]: > + for builder in self.builders[(x86_proc.id,False)]: > builder.builderok = True > break > # Get the builder statistics again. > @@ -270,7 +327,7 @@ > "question is wrong.") > # Disable the *virtual* x86 builders -- should not make any > # difference. > - for builder in self.builders[(1,True)]: > + for builder in self.builders[(x86_proc.id,True)]: > builder.builderok = False > # Get the builder statistics again. > builder_data = bq._getBuilderData() > @@ -330,3 +387,279 @@ > free_count = bq._freeBuildersCount( > build.processor, build.is_virtualized) > self.assertEqual(free_count, 1) > + > + > +class TestMinTimeToNextBuilder(SingleArchBuildsBase): > + """When is the next builder capable of running a given job becoming > + available?""" > + def test_min_time_to_next_builder(self): > + # Test the estimation of the minimum time until a builder becomes > + # available. > + > + # The builds will be set up as follows: > + # > + # gedit, p: 386, v:False e:0:01:00 *** s: 1001 > + # firefox, p: 386, v:False e:0:02:00 *** s: 1002 > + # apg, p: 386, v:False e:0:03:00 *** s: 1003 > + # vim, p: 386, v:False e:0:04:00 *** s: 1004 > + # gcc, p: 386, v:False e:0:05:00 *** s: 1005 > + # bison, p: 386, v:False e:0:06:00 *** s: 1006 > + # flex, p: 386, v:False e:0:07:00 *** s: 1007 > + # postgres, p: 386, v:False e:0:08:00 *** s: 1008 > + # > + # p=processor, v=virtualized, e=estimated_duration, s=score > + > + processor_fam = ProcessorFamilySet().getByName('x86') > + x86_proc = processor_fam.processors[0] > + # This will be the job of interest. > + apg_build, apg_job = find_job(self, 'apg') > + # One of four builders for the 'apg' build is immediately available. > + check_mintime_to_builder(self, apg_job, x86_proc, False, 0) > + > + # Assign the postgres job to a builder. > + assign_to_builder(self, 'postgres', 1) > + # Now one builder is gone. But there should still be a builder > + # immediately available. > + check_mintime_to_builder(self, apg_job, x86_proc, False, 0) > + > + assign_to_builder(self, 'flex', 2) > + check_mintime_to_builder(self, apg_job, x86_proc, False, 0) > + > + assign_to_builder(self, 'bison', 3) > + check_mintime_to_builder(self, apg_job, x86_proc, False, 0) > + > + assign_to_builder(self, 'gcc', 4) > + # Now that no builder is immediately available, the shortest > + # remaing build time (based on the estimated duration) is returned: > + # 300 seconds > + # This is equivalent to the 'gcc' job's estimated duration. > + check_mintime_to_builder(self, apg_job, x86_proc, False, 300) > + > + # Now we pretend that the 'postgres' started 6 minutes ago. Its > + # remaining execution time should be 2 minutes = 120 seconds and > + # it now becomes the job whose builder becomes available next. > + build, bq = find_job(self, 'postgres') > + set_remaining_time_for_running_job(bq, 120) > + check_mintime_to_builder(self, apg_job, x86_proc, False, 120) > + > + # What happens when jobs overdraw the estimated duration? Let's > + # pretend the 'flex' job started 8 minutes ago. > + build, bq = find_job(self, 'flex') > + set_remaining_time_for_running_job(bq, -60) > + # In such a case we assume that the job will complete within 2 > + # minutes, this is a guess that has worked well so far. > + check_mintime_to_builder(self, apg_job, x86_proc, False, 120) > + > + # If there's a job that will complete within a shorter time then > + # we expect to be given that time frame. > + build, bq = find_job(self, 'postgres') > + set_remaining_time_for_running_job(bq, 30) > + check_mintime_to_builder(self, apg_job, x86_proc, False, 30) > + > + # Disable the native x86 builders. > + for builder in self.builders[(1,False)]: > + builder.builderok = False > + > + # No builders capable of running the job at hand are available now, > + # this is indicated by a None value. > + check_mintime_to_builder(self, apg_job, x86_proc, False, None) > + > + > +class MultiArchBuildsBase(TestBuildQueueBase): > + """Test dispatch time estimates for binary builds (i.e. single build > + farm job type) targetting a single processor architecture and the primary > + archive. > + """ > + def setUp(self): > + """Set up some native x86 builds for the test archive.""" > + super(MultiArchBuildsBase, self).setUp() > + # The builds will be set up as follows: > + # > + # gedit, p: hppa, v:False e:0:01:00 *** s: 1001 > + # gedit, p: 386, v:False e:0:02:00 *** s: 1002 > + # firefox, p: hppa, v:False e:0:03:00 *** s: 1003 > + # firefox, p: 386, v:False e:0:04:00 *** s: 1004 > + # apg, p: hppa, v:False e:0:05:00 *** s: 1005 > + # apg, p: 386, v:False e:0:06:00 *** s: 1006 > + # vim, p: hppa, v:False e:0:07:00 *** s: 1007 > + # vim, p: 386, v:False e:0:08:00 *** s: 1008 > + # gcc, p: hppa, v:False e:0:09:00 *** s: 1009 > + # gcc, p: 386, v:False e:0:10:00 *** s: 1010 > + # bison, p: hppa, v:False e:0:11:00 *** s: 1011 > + # bison, p: 386, v:False e:0:12:00 *** s: 1012 > + # flex, p: hppa, v:False e:0:13:00 *** s: 1013 > + # flex, p: 386, v:False e:0:14:00 *** s: 1014 > + # postgres, p: hppa, v:False e:0:15:00 *** s: 1015 > + # postgres, p: 386, v:False e:0:16:00 *** s: 1016 > + # > + # p=processor, v=virtualized, e=estimated_duration, s=score > + > + # First mark all builds in the sample data as already built. > + store = getUtility(IStoreSelector).get(MAIN_STORE, DEFAULT_FLAVOR) > + sample_data = store.find(Build) > + for build in sample_data: > + build.buildstate = BuildStatus.FULLYBUILT > + store.flush() > + > + # We test builds that target a primary archive. > + self.non_ppa = self.factory.makeArchive( > + name="primary", purpose=ArchivePurpose.PRIMARY) > + self.non_ppa.require_virtualized = False > + > + self.builds = [] > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="gedit", status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="firefox", > + status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="apg", status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="vim", status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="gcc", status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="bison", status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="flex", status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + self.builds.extend( > + self.publisher.getPubSource( > + sourcename="postgres", > + status=PackagePublishingStatus.PUBLISHED, > + archive=self.non_ppa, > + architecturehintlist='any').createMissingBuilds()) > + # Set up the builds for test. > + score = 1000 > + duration = 0 > + for build in self.builds: > + score += 1 > + duration += 60 > + bq = build.buildqueue_record > + bq.lastscore = score > + bq.estimated_duration = timedelta(seconds=duration) > + # print_build_setup(self.builds) > + > + > +class TestMinTimeToNextBuilderMulti(MultiArchBuildsBase): > + """When is the next builder capable of running a given job becoming > + available?""" This docstring should belong to the method below, really, as it applies specifically to that test. > + def test_min_time_to_next_builder(self): > + processor_fam = ProcessorFamilySet().getByName('hppa') > + hppa_proc = processor_fam.processors[0] > + > + # One of four builders for the 'apg' build is immediately available. > + apg_build, apg_job = find_job(self, 'apg', 'hppa') > + check_mintime_to_builder(self, apg_job, hppa_proc, False, 0) > + > + # Assign the postgres job to a builder. > + assign_to_builder(self, 'postgres', 1, 'hppa') > + # Now one builder is gone. But there should still be a builder > + # immediately available. > + check_mintime_to_builder(self, apg_job, hppa_proc, False, 0) > + > + assign_to_builder(self, 'flex', 2, 'hppa') > + check_mintime_to_builder(self, apg_job, hppa_proc, False, 0) > + > + assign_to_builder(self, 'bison', 3, 'hppa') > + # Now that no builder is immediately available, the shortest > + # remaing build time (based on the estimated duration) is returned: > + # 660 seconds > + # This is equivalent to the 'bison' job's estimated duration. > + check_mintime_to_builder(self, apg_job, hppa_proc, False, 660) > + > + # Now we pretend that the 'postgres' started 13 minutes ago. Its > + # remaining execution time should be 2 minutes = 120 seconds and > + # it now becomes the job whose builder becomes available next. > + build, bq = find_job(self, 'postgres', 'hppa') > + set_remaining_time_for_running_job(bq, 120) > + check_mintime_to_builder(self, apg_job, hppa_proc, False, 120) > + > + # What happens when jobs overdraw the estimated duration? Let's > + # pretend the 'flex' job started 14 minutes ago. > + build, bq = find_job(self, 'flex', 'hppa') > + set_remaining_time_for_running_job(bq, -60) > + # In such a case we assume that the job will complete within 2 > + # minutes, this is a guess that has worked well so far. > + check_mintime_to_builder(self, apg_job, hppa_proc, False, 120) > + > + # If there's a job that will complete within a shorter time then > + # we expect to be given that time frame. > + build, bq = find_job(self, 'postgres', 'hppa') > + set_remaining_time_for_running_job(bq, 30) > + check_mintime_to_builder(self, apg_job, hppa_proc, False, 30) > + > + # Disable the native hppa builders. > + for builder in self.builders[(hppa_proc.id,False)]: > + builder.builderok = False > + > + # No builders capable of running the job at hand are available now, > + # this is indicated by a None value. > + check_mintime_to_builder(self, apg_job, hppa_proc, False, None) > + > + # Let's assume for the moment that the job at the head of the 'apg' > + # build queue is processor independent. In that case we'd ask for > + # *any* next available builder. > + self.assertTrue( > + bq._freeBuildersCount(None, None) > 0, > + "Builders are immediately available for jobs that don't care " > + "about processor architectures or virtualization") > + check_mintime_to_builder(self, apg_job, None, None, 0) > + > + # Let's disable all builders. > + for builders in self.builders.itervalues(): > + for builder in builders: > + builder.builderok = False > + > + # There are no builders capable of running even the processor > + # independent jobs now and that this is indicated by a None value. > + check_mintime_to_builder(self, apg_job, None, None, None) > + > + # Re-enable the native hppa builders. > + for builder in self.builders[(hppa_proc.id,False)]: > + builder.builderok = True > + > + # The builder that's becoming available next is the one that's > + # running the 'postgres' build. > + check_mintime_to_builder(self, apg_job, None, None, 30) > + > + # Make sure we'll find an x86 builder as well. > + processor_fam = ProcessorFamilySet().getByName('x86') > + x86_proc = processor_fam.processors[0] > + builder = self.builders[(x86_proc.id,False)][0] > + builder.builderok = True > + > + # Now this builder is the one that becomes available next (29 minutes > + # remaining build time). > + assign_to_builder(self, 'gcc', 1, '386') > + build, bq = find_job(self, 'gcc', '386') > + set_remaining_time_for_running_job(bq, 29) > + > + check_mintime_to_builder(self, apg_job, None, None, 29) > + > + # Make a second, idle x86 builder available. > + builder = self.builders[(x86_proc.id,False)][1] > + builder.builderok = True > + > + # That builder should be available immediately since it's idle. > + check_mintime_to_builder(self, apg_job, None, None, 0) >