Package buildbot :: Package status :: Module builder
[frames] | no frames]

Source Code for Module buildbot.status.builder

   1  # This file is part of Buildbot.  Buildbot is free software: you can 
   2  # redistribute it and/or modify it under the terms of the GNU General Public 
   3  # License as published by the Free Software Foundation, version 2. 
   4  # 
   5  # This program is distributed in the hope that it will be useful, but WITHOUT 
   6  # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 
   7  # FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more 
   8  # details. 
   9  # 
  10  # You should have received a copy of the GNU General Public License along with 
  11  # this program; if not, write to the Free Software Foundation, Inc., 51 
  12  # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 
  13  # 
  14  # Copyright Buildbot Team Members 
  15   
  16   
  17  from zope.interface import implements 
  18  from twisted.python import log, runtime 
  19  from twisted.persisted import styles 
  20  from twisted.internet import reactor, defer, threads 
  21  import twisted.internet.interfaces 
  22  from twisted.protocols import basic 
  23  from buildbot.process.properties import Properties 
  24  from buildbot.util import collections 
  25  from buildbot.util.eventual import eventually 
  26   
  27  import weakref 
  28  import os, shutil, re, urllib, itertools 
  29  import gc 
  30  import time 
  31  from cPickle import load, dump 
  32  from cStringIO import StringIO 
  33  from bz2 import BZ2File 
  34  from gzip import GzipFile 
  35   
  36  # sibling imports 
  37  from buildbot import interfaces, util, sourcestamp 
  38   
  39  SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY = range(6) 
  40  Results = ["success", "warnings", "failure", "skipped", "exception", "retry"] 
41 42 -def worst_status(a, b):
43 # SUCCESS > WARNINGS > FAILURE > EXCEPTION > RETRY 44 # Retry needs to be considered the worst so that conusmers don't have to 45 # worry about other failures undermining the RETRY. 46 for s in (RETRY, EXCEPTION, FAILURE, WARNINGS, SKIPPED, SUCCESS): 47 if s in (a, b): 48 return s
49 50 # build processes call the following methods: 51 # 52 # setDefaults 53 # 54 # currentlyBuilding 55 # currentlyIdle 56 # currentlyInterlocked 57 # currentlyOffline 58 # currentlyWaiting 59 # 60 # setCurrentActivity 61 # updateCurrentActivity 62 # addFileToCurrentActivity 63 # finishCurrentActivity 64 # 65 # startBuild 66 # finishBuild 67 68 STDOUT = interfaces.LOG_CHANNEL_STDOUT 69 STDERR = interfaces.LOG_CHANNEL_STDERR 70 HEADER = interfaces.LOG_CHANNEL_HEADER 71 ChunkTypes = ["stdout", "stderr", "header"]
72 73 -class NullAddress(object):
74 "an address for NullTransport" 75 implements(twisted.internet.interfaces.IAddress)
76
77 -class NullTransport(object):
78 "a do-nothing transport to make NetstringReceiver happy" 79 implements(twisted.internet.interfaces.ITransport)
80 - def write(self, data): raise NotImplementedError
81 - def writeSequence(self, data): raise NotImplementedError
82 - def loseConnection(self): pass
83 - def getPeer(self):
84 return NullAddress
85 - def getHost(self):
86 return NullAddress
87
88 -class LogFileScanner(basic.NetstringReceiver):
89 - def __init__(self, chunk_cb, channels=[]):
90 self.chunk_cb = chunk_cb 91 self.channels = channels 92 self.makeConnection(NullTransport())
93
94 - def stringReceived(self, line):
95 channel = int(line[0]) 96 if not self.channels or (channel in self.channels): 97 self.chunk_cb((channel, line[1:]))
98
99 -class LogFileProducer:
100 """What's the plan? 101 102 the LogFile has just one FD, used for both reading and writing. 103 Each time you add an entry, fd.seek to the end and then write. 104 105 Each reader (i.e. Producer) keeps track of their own offset. The reader 106 starts by seeking to the start of the logfile, and reading forwards. 107 Between each hunk of file they yield chunks, so they must remember their 108 offset before yielding and re-seek back to that offset before reading 109 more data. When their read() returns EOF, they're finished with the first 110 phase of the reading (everything that's already been written to disk). 111 112 After EOF, the remaining data is entirely in the current entries list. 113 These entries are all of the same channel, so we can do one "".join and 114 obtain a single chunk to be sent to the listener. But since that involves 115 a yield, and more data might arrive after we give up control, we have to 116 subscribe them before yielding. We can't subscribe them any earlier, 117 otherwise they'd get data out of order. 118 119 We're using a generator in the first place so that the listener can 120 throttle us, which means they're pulling. But the subscription means 121 we're pushing. Really we're a Producer. In the first phase we can be 122 either a PullProducer or a PushProducer. In the second phase we're only a 123 PushProducer. 124 125 So the client gives a LogFileConsumer to File.subscribeConsumer . This 126 Consumer must have registerProducer(), unregisterProducer(), and 127 writeChunk(), and is just like a regular twisted.interfaces.IConsumer, 128 except that writeChunk() takes chunks (tuples of (channel,text)) instead 129 of the normal write() which takes just text. The LogFileConsumer is 130 allowed to call stopProducing, pauseProducing, and resumeProducing on the 131 producer instance it is given. """ 132 133 paused = False 134 subscribed = False 135 BUFFERSIZE = 2048 136
137 - def __init__(self, logfile, consumer):
138 self.logfile = logfile 139 self.consumer = consumer 140 self.chunkGenerator = self.getChunks() 141 consumer.registerProducer(self, True)
142
143 - def getChunks(self):
144 f = self.logfile.getFile() 145 offset = 0 146 chunks = [] 147 p = LogFileScanner(chunks.append) 148 f.seek(offset) 149 data = f.read(self.BUFFERSIZE) 150 offset = f.tell() 151 while data: 152 p.dataReceived(data) 153 while chunks: 154 c = chunks.pop(0) 155 yield c 156 f.seek(offset) 157 data = f.read(self.BUFFERSIZE) 158 offset = f.tell() 159 del f 160 161 # now subscribe them to receive new entries 162 self.subscribed = True 163 self.logfile.watchers.append(self) 164 d = self.logfile.waitUntilFinished() 165 166 # then give them the not-yet-merged data 167 if self.logfile.runEntries: 168 channel = self.logfile.runEntries[0][0] 169 text = "".join([c[1] for c in self.logfile.runEntries]) 170 yield (channel, text) 171 172 # now we've caught up to the present. Anything further will come from 173 # the logfile subscription. We add the callback *after* yielding the 174 # data from runEntries, because the logfile might have finished 175 # during the yield. 176 d.addCallback(self.logfileFinished)
177
178 - def stopProducing(self):
179 # TODO: should we still call consumer.finish? probably not. 180 self.paused = True 181 self.consumer = None 182 self.done()
183
184 - def done(self):
185 if self.chunkGenerator: 186 self.chunkGenerator = None # stop making chunks 187 if self.subscribed: 188 self.logfile.watchers.remove(self) 189 self.subscribed = False
190
191 - def pauseProducing(self):
192 self.paused = True
193
194 - def resumeProducing(self):
195 # Twisted-1.3.0 has a bug which causes hangs when resumeProducing 196 # calls transport.write (there is a recursive loop, fixed in 2.0 in 197 # t.i.abstract.FileDescriptor.doWrite by setting the producerPaused 198 # flag *before* calling resumeProducing). To work around this, we 199 # just put off the real resumeProducing for a moment. This probably 200 # has a performance hit, but I'm going to assume that the log files 201 # are not retrieved frequently enough for it to be an issue. 202 203 eventually(self._resumeProducing)
204
205 - def _resumeProducing(self):
206 self.paused = False 207 if not self.chunkGenerator: 208 return 209 try: 210 while not self.paused: 211 chunk = self.chunkGenerator.next() 212 self.consumer.writeChunk(chunk) 213 # we exit this when the consumer says to stop, or we run out 214 # of chunks 215 except StopIteration: 216 # if the generator finished, it will have done releaseFile 217 self.chunkGenerator = None
218 # now everything goes through the subscription, and they don't get to 219 # pause anymore 220
221 - def logChunk(self, build, step, logfile, channel, chunk):
222 if self.consumer: 223 self.consumer.writeChunk((channel, chunk))
224
225 - def logfileFinished(self, logfile):
226 self.done() 227 if self.consumer: 228 self.consumer.unregisterProducer() 229 self.consumer.finish() 230 self.consumer = None
231
232 -def _tryremove(filename, timeout, retries):
233 """Try to remove a file, and if failed, try again in timeout. 234 Increases the timeout by a factor of 4, and only keeps trying for 235 another retries-amount of times. 236 237 """ 238 try: 239 os.unlink(filename) 240 except OSError: 241 if retries > 0: 242 reactor.callLater(timeout, _tryremove, filename, timeout * 4, 243 retries - 1) 244 else: 245 log.msg("giving up on removing %s after over %d seconds" % 246 (filename, timeout))
247
248 -class LogFile:
249 """A LogFile keeps all of its contents on disk, in a non-pickle format to 250 which new entries can easily be appended. The file on disk has a name 251 like 12-log-compile-output, under the Builder's directory. The actual 252 filename is generated (before the LogFile is created) by 253 L{BuildStatus.generateLogfileName}. 254 255 Old LogFile pickles (which kept their contents in .entries) must be 256 upgraded. The L{BuilderStatus} is responsible for doing this, when it 257 loads the L{BuildStatus} into memory. The Build pickle is not modified, 258 so users who go from 0.6.5 back to 0.6.4 don't have to lose their 259 logs.""" 260 261 implements(interfaces.IStatusLog, interfaces.ILogFile) 262 263 finished = False 264 length = 0 265 nonHeaderLength = 0 266 tailLength = 0 267 chunkSize = 10*1000 268 runLength = 0 269 # No max size by default 270 logMaxSize = None 271 # Don't keep a tail buffer by default 272 logMaxTailSize = None 273 maxLengthExceeded = False 274 runEntries = [] # provided so old pickled builds will getChunks() ok 275 entries = None 276 BUFFERSIZE = 2048 277 filename = None # relative to the Builder's basedir 278 openfile = None 279 compressMethod = "bz2" 280
281 - def __init__(self, parent, name, logfilename):
282 """ 283 @type parent: L{BuildStepStatus} 284 @param parent: the Step that this log is a part of 285 @type name: string 286 @param name: the name of this log, typically 'output' 287 @type logfilename: string 288 @param logfilename: the Builder-relative pathname for the saved entries 289 """ 290 self.step = parent 291 self.name = name 292 self.filename = logfilename 293 fn = self.getFilename() 294 if os.path.exists(fn): 295 # the buildmaster was probably stopped abruptly, before the 296 # BuilderStatus could be saved, so BuilderStatus.nextBuildNumber 297 # is out of date, and we're overlapping with earlier builds now. 298 # Warn about it, but then overwrite the old pickle file 299 log.msg("Warning: Overwriting old serialized Build at %s" % fn) 300 dirname = os.path.dirname(fn) 301 if not os.path.exists(dirname): 302 os.makedirs(dirname) 303 self.openfile = open(fn, "w+") 304 self.runEntries = [] 305 self.watchers = [] 306 self.finishedWatchers = [] 307 self.tailBuffer = []
308
309 - def getFilename(self):
310 return os.path.join(self.step.build.builder.basedir, self.filename)
311
312 - def hasContents(self):
313 return os.path.exists(self.getFilename() + '.bz2') or \ 314 os.path.exists(self.getFilename() + '.gz') or \ 315 os.path.exists(self.getFilename())
316
317 - def getName(self):
318 return self.name
319
320 - def getStep(self):
321 return self.step
322
323 - def isFinished(self):
324 return self.finished
325 - def waitUntilFinished(self):
326 if self.finished: 327 d = defer.succeed(self) 328 else: 329 d = defer.Deferred() 330 self.finishedWatchers.append(d) 331 return d
332
333 - def getFile(self):
334 if self.openfile: 335 # this is the filehandle we're using to write to the log, so 336 # don't close it! 337 return self.openfile 338 # otherwise they get their own read-only handle 339 # try a compressed log first 340 try: 341 return BZ2File(self.getFilename() + ".bz2", "r") 342 except IOError: 343 pass 344 try: 345 return GzipFile(self.getFilename() + ".gz", "r") 346 except IOError: 347 pass 348 return open(self.getFilename(), "r")
349
350 - def getText(self):
351 # this produces one ginormous string 352 return "".join(self.getChunks([STDOUT, STDERR], onlyText=True))
353
354 - def getTextWithHeaders(self):
355 return "".join(self.getChunks(onlyText=True))
356
357 - def getChunks(self, channels=[], onlyText=False):
358 # generate chunks for everything that was logged at the time we were 359 # first called, so remember how long the file was when we started. 360 # Don't read beyond that point. The current contents of 361 # self.runEntries will follow. 362 363 # this returns an iterator, which means arbitrary things could happen 364 # while we're yielding. This will faithfully deliver the log as it 365 # existed when it was started, and not return anything after that 366 # point. To use this in subscribe(catchup=True) without missing any 367 # data, you must insure that nothing will be added to the log during 368 # yield() calls. 369 370 f = self.getFile() 371 if not self.finished: 372 offset = 0 373 f.seek(0, 2) 374 remaining = f.tell() 375 else: 376 offset = 0 377 remaining = None 378 379 leftover = None 380 if self.runEntries and (not channels or 381 (self.runEntries[0][0] in channels)): 382 leftover = (self.runEntries[0][0], 383 "".join([c[1] for c in self.runEntries])) 384 385 # freeze the state of the LogFile by passing a lot of parameters into 386 # a generator 387 return self._generateChunks(f, offset, remaining, leftover, 388 channels, onlyText)
389
390 - def _generateChunks(self, f, offset, remaining, leftover, 391 channels, onlyText):
392 chunks = [] 393 p = LogFileScanner(chunks.append, channels) 394 f.seek(offset) 395 if remaining is not None: 396 data = f.read(min(remaining, self.BUFFERSIZE)) 397 remaining -= len(data) 398 else: 399 data = f.read(self.BUFFERSIZE) 400 401 offset = f.tell() 402 while data: 403 p.dataReceived(data) 404 while chunks: 405 channel, text = chunks.pop(0) 406 if onlyText: 407 yield text 408 else: 409 yield (channel, text) 410 f.seek(offset) 411 if remaining is not None: 412 data = f.read(min(remaining, self.BUFFERSIZE)) 413 remaining -= len(data) 414 else: 415 data = f.read(self.BUFFERSIZE) 416 offset = f.tell() 417 del f 418 419 if leftover: 420 if onlyText: 421 yield leftover[1] 422 else: 423 yield leftover
424
425 - def readlines(self, channel=STDOUT):
426 """Return an iterator that produces newline-terminated lines, 427 excluding header chunks.""" 428 # TODO: make this memory-efficient, by turning it into a generator 429 # that retrieves chunks as necessary, like a pull-driven version of 430 # twisted.protocols.basic.LineReceiver 431 alltext = "".join(self.getChunks([channel], onlyText=True)) 432 io = StringIO(alltext) 433 return io.readlines()
434
435 - def subscribe(self, receiver, catchup):
436 if self.finished: 437 return 438 self.watchers.append(receiver) 439 if catchup: 440 for channel, text in self.getChunks(): 441 # TODO: add logChunks(), to send over everything at once? 442 receiver.logChunk(self.step.build, self.step, self, 443 channel, text)
444
445 - def unsubscribe(self, receiver):
446 if receiver in self.watchers: 447 self.watchers.remove(receiver)
448
449 - def subscribeConsumer(self, consumer):
450 p = LogFileProducer(self, consumer) 451 p.resumeProducing()
452 453 # interface used by the build steps to add things to the log 454
455 - def merge(self):
456 # merge all .runEntries (which are all of the same type) into a 457 # single chunk for .entries 458 if not self.runEntries: 459 return 460 channel = self.runEntries[0][0] 461 text = "".join([c[1] for c in self.runEntries]) 462 assert channel < 10 463 f = self.openfile 464 f.seek(0, 2) 465 offset = 0 466 while offset < len(text): 467 size = min(len(text)-offset, self.chunkSize) 468 f.write("%d:%d" % (1 + size, channel)) 469 f.write(text[offset:offset+size]) 470 f.write(",") 471 offset += size 472 self.runEntries = [] 473 self.runLength = 0
474
475 - def addEntry(self, channel, text):
476 assert not self.finished 477 478 if isinstance(text, unicode): 479 text = text.encode('utf-8') 480 if channel != HEADER: 481 # Truncate the log if it's more than logMaxSize bytes 482 if self.logMaxSize and self.nonHeaderLength > self.logMaxSize: 483 # Add a message about what's going on 484 if not self.maxLengthExceeded: 485 msg = "\nOutput exceeded %i bytes, remaining output has been truncated\n" % self.logMaxSize 486 self.addEntry(HEADER, msg) 487 self.merge() 488 self.maxLengthExceeded = True 489 490 if self.logMaxTailSize: 491 # Update the tail buffer 492 self.tailBuffer.append((channel, text)) 493 self.tailLength += len(text) 494 while self.tailLength > self.logMaxTailSize: 495 # Drop some stuff off the beginning of the buffer 496 c,t = self.tailBuffer.pop(0) 497 n = len(t) 498 self.tailLength -= n 499 assert self.tailLength >= 0 500 return 501 502 self.nonHeaderLength += len(text) 503 504 # we only add to .runEntries here. merge() is responsible for adding 505 # merged chunks to .entries 506 if self.runEntries and channel != self.runEntries[0][0]: 507 self.merge() 508 self.runEntries.append((channel, text)) 509 self.runLength += len(text) 510 if self.runLength >= self.chunkSize: 511 self.merge() 512 513 for w in self.watchers: 514 w.logChunk(self.step.build, self.step, self, channel, text) 515 self.length += len(text)
516
517 - def addStdout(self, text):
518 self.addEntry(STDOUT, text)
519 - def addStderr(self, text):
520 self.addEntry(STDERR, text)
521 - def addHeader(self, text):
522 self.addEntry(HEADER, text)
523
524 - def finish(self):
525 if self.tailBuffer: 526 msg = "\nFinal %i bytes follow below:\n" % self.tailLength 527 tmp = self.runEntries 528 self.runEntries = [(HEADER, msg)] 529 self.merge() 530 self.runEntries = self.tailBuffer 531 self.merge() 532 self.runEntries = tmp 533 self.merge() 534 self.tailBuffer = [] 535 else: 536 self.merge() 537 538 if self.openfile: 539 # we don't do an explicit close, because there might be readers 540 # shareing the filehandle. As soon as they stop reading, the 541 # filehandle will be released and automatically closed. 542 self.openfile.flush() 543 del self.openfile 544 self.finished = True 545 watchers = self.finishedWatchers 546 self.finishedWatchers = [] 547 for w in watchers: 548 w.callback(self) 549 self.watchers = []
550 551
552 - def compressLog(self):
553 # bail out if there's no compression support 554 if self.compressMethod == "bz2": 555 compressed = self.getFilename() + ".bz2.tmp" 556 elif self.compressMethod == "gz": 557 compressed = self.getFilename() + ".gz.tmp" 558 d = threads.deferToThread(self._compressLog, compressed) 559 d.addCallback(self._renameCompressedLog, compressed) 560 d.addErrback(self._cleanupFailedCompress, compressed) 561 return d
562
563 - def _compressLog(self, compressed):
564 infile = self.getFile() 565 if self.compressMethod == "bz2": 566 cf = BZ2File(compressed, 'w') 567 elif self.compressMethod == "gz": 568 cf = GzipFile(compressed, 'w') 569 bufsize = 1024*1024 570 while True: 571 buf = infile.read(bufsize) 572 cf.write(buf) 573 if len(buf) < bufsize: 574 break 575 cf.close()
576 - def _renameCompressedLog(self, rv, compressed):
577 if self.compressMethod == "bz2": 578 filename = self.getFilename() + '.bz2' 579 else: 580 filename = self.getFilename() + '.gz' 581 if runtime.platformType == 'win32': 582 # windows cannot rename a file on top of an existing one, so 583 # fall back to delete-first. There are ways this can fail and 584 # lose the builder's history, so we avoid using it in the 585 # general (non-windows) case 586 if os.path.exists(filename): 587 os.unlink(filename) 588 os.rename(compressed, filename) 589 _tryremove(self.getFilename(), 1, 5)
590 - def _cleanupFailedCompress(self, failure, compressed):
591 log.msg("failed to compress %s" % self.getFilename()) 592 if os.path.exists(compressed): 593 _tryremove(compressed, 1, 5) 594 failure.trap() # reraise the failure
595 596 # persistence stuff
597 - def __getstate__(self):
598 d = self.__dict__.copy() 599 del d['step'] # filled in upon unpickling 600 del d['watchers'] 601 del d['finishedWatchers'] 602 d['entries'] = [] # let 0.6.4 tolerate the saved log. TODO: really? 603 if d.has_key('finished'): 604 del d['finished'] 605 if d.has_key('openfile'): 606 del d['openfile'] 607 return d
608
609 - def __setstate__(self, d):
610 self.__dict__ = d 611 self.watchers = [] # probably not necessary 612 self.finishedWatchers = [] # same 613 # self.step must be filled in by our parent 614 self.finished = True
615
616 - def upgrade(self, logfilename):
617 """Save our .entries to a new-style offline log file (if necessary), 618 and modify our in-memory representation to use it. The original 619 pickled LogFile (inside the pickled Build) won't be modified.""" 620 self.filename = logfilename 621 if not os.path.exists(self.getFilename()): 622 self.openfile = open(self.getFilename(), "w") 623 self.finished = False 624 for channel,text in self.entries: 625 self.addEntry(channel, text) 626 self.finish() # releases self.openfile, which will be closed 627 del self.entries
628
629 -class HTMLLogFile:
630 implements(interfaces.IStatusLog) 631 632 filename = None 633
634 - def __init__(self, parent, name, logfilename, html):
635 self.step = parent 636 self.name = name 637 self.filename = logfilename 638 self.html = html
639
640 - def getName(self):
641 return self.name # set in BuildStepStatus.addLog
642 - def getStep(self):
643 return self.step
644
645 - def isFinished(self):
646 return True
647 - def waitUntilFinished(self):
648 return defer.succeed(self)
649
650 - def hasContents(self):
651 return True
652 - def getText(self):
653 return self.html # looks kinda like text
654 - def getTextWithHeaders(self):
655 return self.html
656 - def getChunks(self):
657 return [(STDERR, self.html)]
658
659 - def subscribe(self, receiver, catchup):
660 pass
661 - def unsubscribe(self, receiver):
662 pass
663
664 - def finish(self):
665 pass
666
667 - def __getstate__(self):
668 d = self.__dict__.copy() 669 del d['step'] 670 return d
671
672 - def upgrade(self, logfilename):
673 pass
674
675 676 -class Event:
677 implements(interfaces.IStatusEvent) 678 679 started = None 680 finished = None 681 text = [] 682 683 # IStatusEvent methods
684 - def getTimes(self):
685 return (self.started, self.finished)
686 - def getText(self):
687 return self.text
688 - def getLogs(self):
689 return []
690
691 - def finish(self):
692 self.finished = util.now()
693
694 -class TestResult:
695 implements(interfaces.ITestResult) 696
697 - def __init__(self, name, results, text, logs):
698 assert isinstance(name, tuple) 699 self.name = name 700 self.results = results 701 self.text = text 702 self.logs = logs
703
704 - def getName(self):
705 return self.name
706
707 - def getResults(self):
708 return self.results
709
710 - def getText(self):
711 return self.text
712
713 - def getLogs(self):
714 return self.logs
715
716 717 -class BuildSetStatus:
718 implements(interfaces.IBuildSetStatus) 719
720 - def __init__(self, bsid, status, db):
721 self.id = bsid 722 self.status = status 723 self.db = db
724
725 - def _get_info(self):
726 return self.db.get_buildset_info(self.id)
727 728 # methods for our clients 729
730 - def getSourceStamp(self):
731 (external_idstring, reason, ssid, complete, results) = self._get_info() 732 return self.db.getSourceStampNumberedNow(ssid)
733
734 - def getReason(self):
735 (external_idstring, reason, ssid, complete, results) = self._get_info() 736 return reason
737 - def getResults(self):
738 (external_idstring, reason, ssid, complete, results) = self._get_info() 739 return results
740 - def getID(self):
741 (external_idstring, reason, ssid, complete, results) = self._get_info() 742 return external_idstring
743
745 brs = {} 746 brids = self.db.get_buildrequestids_for_buildset(self.id) 747 for (buildername, brid) in brids.items(): 748 brs[buildername] = BuildRequestStatus(brid, self.status, self.db) 749 return brs
750
751 - def getBuilderNames(self):
752 brs = self.db.get_buildrequestids_for_buildset(self.id) 753 return sorted(brs.keys())
754
755 - def getBuildRequests(self):
756 brs = self.db.get_buildrequestids_for_buildset(self.id) 757 return [BuildRequestStatus(brid, self.status, self.db) 758 for brid in brs.values()]
759
760 - def isFinished(self):
761 (external_idstring, reason, ssid, complete, results) = self._get_info() 762 return complete
763
764 - def waitUntilSuccess(self):
765 return self.status._buildset_waitUntilSuccess(self.id)
766 - def waitUntilFinished(self):
767 return self.status._buildset_waitUntilFinished(self.id)
768
769 -class BuildRequestStatus:
770 implements(interfaces.IBuildRequestStatus) 771
772 - def __init__(self, brid, status, db):
773 self.brid = brid 774 self.status = status 775 self.db = db
776
777 - def buildStarted(self, build):
778 self.status._buildrequest_buildStarted(build.status) 779 self.builds.append(build.status)
780 781 # methods called by our clients
782 - def getSourceStamp(self):
783 br = self.db.getBuildRequestWithNumber(self.brid) 784 return br.source
785 - def getBuilderName(self):
786 br = self.db.getBuildRequestWithNumber(self.brid) 787 return br.builderName
788 - def getBuilds(self):
789 builder = self.status.getBuilder(self.getBuilderName()) 790 builds = [] 791 buildnums = sorted(self.db.get_buildnums_for_brid(self.brid)) 792 for buildnum in buildnums: 793 bs = builder.getBuild(buildnum) 794 if bs: 795 builds.append(bs) 796 return builds
797
798 - def subscribe(self, observer):
799 oldbuilds = self.getBuilds() 800 for bs in oldbuilds: 801 eventually(observer, bs) 802 self.status._buildrequest_subscribe(self.brid, observer)
803 - def unsubscribe(self, observer):
804 self.status._buildrequest_unsubscribe(self.brid, observer)
805
806 - def getSubmitTime(self):
807 br = self.db.getBuildRequestWithNumber(self.brid) 808 return br.submittedAt
809
810 - def asDict(self):
811 result = {} 812 # Constant 813 result['source'] = self.getSourceStamp().asDict() 814 result['builderName'] = self.getBuilderName() 815 result['submittedAt'] = self.getSubmitTime() 816 817 # Transient 818 result['builds'] = [build.asDict() for build in self.getBuilds()] 819 return result
820
821 822 -class BuildStepStatus(styles.Versioned):
823 """ 824 I represent a collection of output status for a 825 L{buildbot.process.step.BuildStep}. 826 827 Statistics contain any information gleaned from a step that is 828 not in the form of a logfile. As an example, steps that run 829 tests might gather statistics about the number of passed, failed, 830 or skipped tests. 831 832 @type progress: L{buildbot.status.progress.StepProgress} 833 @cvar progress: tracks ETA for the step 834 @type text: list of strings 835 @cvar text: list of short texts that describe the command and its status 836 @type text2: list of strings 837 @cvar text2: list of short texts added to the overall build description 838 @type logs: dict of string -> L{buildbot.status.builder.LogFile} 839 @ivar logs: logs of steps 840 @type statistics: dict 841 @ivar statistics: results from running this step 842 """ 843 # note that these are created when the Build is set up, before each 844 # corresponding BuildStep has started. 845 implements(interfaces.IBuildStepStatus, interfaces.IStatusEvent) 846 847 persistenceVersion = 3 848 persistenceForgets = ( 'wasUpgraded', ) 849 850 started = None 851 finished = None 852 progress = None 853 text = [] 854 results = (None, []) 855 text2 = [] 856 watchers = [] 857 updates = {} 858 finishedWatchers = [] 859 statistics = {} 860 step_number = None 861
862 - def __init__(self, parent, step_number):
863 assert interfaces.IBuildStatus(parent) 864 self.build = parent 865 self.step_number = step_number 866 self.logs = [] 867 self.urls = {} 868 self.watchers = [] 869 self.updates = {} 870 self.finishedWatchers = [] 871 self.statistics = {} 872 self.skipped = False 873 874 self.waitingForLocks = False
875
876 - def getName(self):
877 """Returns a short string with the name of this step. This string 878 may have spaces in it.""" 879 return self.name
880
881 - def getBuild(self):
882 return self.build
883
884 - def getTimes(self):
885 return (self.started, self.finished)
886
887 - def getExpectations(self):
888 """Returns a list of tuples (name, current, target).""" 889 if not self.progress: 890 return [] 891 ret = [] 892 metrics = self.progress.progress.keys() 893 metrics.sort() 894 for m in metrics: 895 t = (m, self.progress.progress[m], self.progress.expectations[m]) 896 ret.append(t) 897 return ret
898
899 - def getLogs(self):
900 return self.logs
901
902 - def getURLs(self):
903 return self.urls.copy()
904
905 - def isStarted(self):
906 return (self.started is not None)
907
908 - def isSkipped(self):
909 return self.skipped
910
911 - def isFinished(self):
912 return (self.finished is not None)
913
914 - def waitUntilFinished(self):
915 if self.finished: 916 d = defer.succeed(self) 917 else: 918 d = defer.Deferred() 919 self.finishedWatchers.append(d) 920 return d
921 922 # while the step is running, the following methods make sense. 923 # Afterwards they return None 924
925 - def getETA(self):
926 if self.started is None: 927 return None # not started yet 928 if self.finished is not None: 929 return None # already finished 930 if not self.progress: 931 return None # no way to predict 932 return self.progress.remaining()
933 934 # Once you know the step has finished, the following methods are legal. 935 # Before this step has finished, they all return None. 936
937 - def getText(self):
938 """Returns a list of strings which describe the step. These are 939 intended to be displayed in a narrow column. If more space is 940 available, the caller should join them together with spaces before 941 presenting them to the user.""" 942 return self.text
943
944 - def getResults(self):
945 """Return a tuple describing the results of the step. 946 'result' is one of the constants in L{buildbot.status.builder}: 947 SUCCESS, WARNINGS, FAILURE, or SKIPPED. 948 'strings' is an optional list of strings that the step wants to 949 append to the overall build's results. These strings are usually 950 more terse than the ones returned by getText(): in particular, 951 successful Steps do not usually contribute any text to the 952 overall build. 953 954 @rtype: tuple of int, list of strings 955 @returns: (result, strings) 956 """ 957 return (self.results, self.text2)
958
959 - def hasStatistic(self, name):
960 """Return true if this step has a value for the given statistic. 961 """ 962 return self.statistics.has_key(name)
963
964 - def getStatistic(self, name, default=None):
965 """Return the given statistic, if present 966 """ 967 return self.statistics.get(name, default)
968 969 # subscription interface 970
971 - def subscribe(self, receiver, updateInterval=10):
972 # will get logStarted, logFinished, stepETAUpdate 973 assert receiver not in self.watchers 974 self.watchers.append(receiver) 975 self.sendETAUpdate(receiver, updateInterval)
976
977 - def sendETAUpdate(self, receiver, updateInterval):
978 self.updates[receiver] = None 979 # they might unsubscribe during stepETAUpdate 980 receiver.stepETAUpdate(self.build, self, 981 self.getETA(), self.getExpectations()) 982 if receiver in self.watchers: 983 self.updates[receiver] = reactor.callLater(updateInterval, 984 self.sendETAUpdate, 985 receiver, 986 updateInterval)
987
988 - def unsubscribe(self, receiver):
989 if receiver in self.watchers: 990 self.watchers.remove(receiver) 991 if receiver in self.updates: 992 if self.updates[receiver] is not None: 993 self.updates[receiver].cancel() 994 del self.updates[receiver]
995 996 997 # methods to be invoked by the BuildStep 998
999 - def setName(self, stepname):
1000 self.name = stepname
1001
1002 - def setColor(self, color):
1003 log.msg("BuildStepStatus.setColor is no longer supported -- ignoring color %s" % (color,))
1004
1005 - def setProgress(self, stepprogress):
1006 self.progress = stepprogress
1007
1008 - def stepStarted(self):
1009 self.started = util.now() 1010 if self.build: 1011 self.build.stepStarted(self)
1012
1013 - def addLog(self, name):
1014 assert self.started # addLog before stepStarted won't notify watchers 1015 logfilename = self.build.generateLogfileName(self.name, name) 1016 log = LogFile(self, name, logfilename) 1017 log.logMaxSize = self.build.builder.logMaxSize 1018 log.logMaxTailSize = self.build.builder.logMaxTailSize 1019 log.compressMethod = self.build.builder.logCompressionMethod 1020 self.logs.append(log) 1021 for w in self.watchers: 1022 receiver = w.logStarted(self.build, self, log) 1023 if receiver: 1024 log.subscribe(receiver, True) 1025 d = log.waitUntilFinished() 1026 d.addCallback(lambda log: log.unsubscribe(receiver)) 1027 d = log.waitUntilFinished() 1028 d.addCallback(self.logFinished) 1029 return log
1030
1031 - def addHTMLLog(self, name, html):
1032 assert self.started # addLog before stepStarted won't notify watchers 1033 logfilename = self.build.generateLogfileName(self.name, name) 1034 log = HTMLLogFile(self, name, logfilename, html) 1035 self.logs.append(log) 1036 for w in self.watchers: 1037 w.logStarted(self.build, self, log) 1038 w.logFinished(self.build, self, log)
1039
1040 - def logFinished(self, log):
1041 for w in self.watchers: 1042 w.logFinished(self.build, self, log)
1043
1044 - def addURL(self, name, url):
1045 self.urls[name] = url
1046
1047 - def setText(self, text):
1048 self.text = text 1049 for w in self.watchers: 1050 w.stepTextChanged(self.build, self, text)
1051 - def setText2(self, text):
1052 self.text2 = text 1053 for w in self.watchers: 1054 w.stepText2Changed(self.build, self, text)
1055
1056 - def setStatistic(self, name, value):
1057 """Set the given statistic. Usually called by subclasses. 1058 """ 1059 self.statistics[name] = value
1060
1061 - def setSkipped(self, skipped):
1062 self.skipped = skipped
1063
1064 - def stepFinished(self, results):
1065 self.finished = util.now() 1066 self.results = results 1067 cld = [] # deferreds for log compression 1068 logCompressionLimit = self.build.builder.logCompressionLimit 1069 for loog in self.logs: 1070 if not loog.isFinished(): 1071 loog.finish() 1072 # if log compression is on, and it's a real LogFile, 1073 # HTMLLogFiles aren't files 1074 if logCompressionLimit is not False and \ 1075 isinstance(loog, LogFile): 1076 if os.path.getsize(loog.getFilename()) > logCompressionLimit: 1077 loog_deferred = loog.compressLog() 1078 if loog_deferred: 1079 cld.append(loog_deferred) 1080 1081 for r in self.updates.keys(): 1082 if self.updates[r] is not None: 1083 self.updates[r].cancel() 1084 del self.updates[r] 1085 1086 watchers = self.finishedWatchers 1087 self.finishedWatchers = [] 1088 for w in watchers: 1089 w.callback(self) 1090 if cld: 1091 return defer.DeferredList(cld)
1092
1093 - def checkLogfiles(self):
1094 # filter out logs that have been deleted 1095 self.logs = [ l for l in self.logs if l.hasContents() ]
1096
1097 - def isWaitingForLocks(self):
1098 return self.waitingForLocks
1099
1100 - def setWaitingForLocks(self, waiting):
1101 self.waitingForLocks = waiting
1102 1103 # persistence 1104
1105 - def __getstate__(self):
1106 d = styles.Versioned.__getstate__(self) 1107 del d['build'] # filled in when loading 1108 if d.has_key('progress'): 1109 del d['progress'] 1110 del d['watchers'] 1111 del d['finishedWatchers'] 1112 del d['updates'] 1113 return d
1114
1115 - def __setstate__(self, d):
1116 styles.Versioned.__setstate__(self, d) 1117 # self.build must be filled in by our parent 1118 1119 # point the logs to this object 1120 for loog in self.logs: 1121 loog.step = self 1122 self.watchers = [] 1123 self.finishedWatchers = [] 1124 self.updates = {}
1125
1126 - def upgradeToVersion1(self):
1127 if not hasattr(self, "urls"): 1128 self.urls = {} 1129 self.wasUpgraded = True
1130
1131 - def upgradeToVersion2(self):
1132 if not hasattr(self, "statistics"): 1133 self.statistics = {} 1134 self.wasUpgraded = True
1135
1136 - def upgradeToVersion3(self):
1137 if not hasattr(self, "step_number"): 1138 self.step_number = 0 1139 self.wasUpgraded = True
1140
1141 - def asDict(self):
1142 result = {} 1143 # Constant 1144 result['name'] = self.getName() 1145 1146 # Transient 1147 result['text'] = self.getText() 1148 result['results'] = self.getResults() 1149 result['isStarted'] = self.isStarted() 1150 result['isFinished'] = self.isFinished() 1151 result['statistics'] = self.statistics 1152 result['times'] = self.getTimes() 1153 result['expectations'] = self.getExpectations() 1154 result['eta'] = self.getETA() 1155 result['urls'] = self.getURLs() 1156 result['step_number'] = self.step_number 1157 # TODO(maruel): Move that to a sub-url or just publish the log_url 1158 # instead. 1159 #result['logs'] = self.getLogs() 1160 return result
1161
1162 1163 -class BuildStatus(styles.Versioned):
1164 implements(interfaces.IBuildStatus, interfaces.IStatusEvent) 1165 1166 persistenceVersion = 3 1167 persistenceForgets = ( 'wasUpgraded', ) 1168 1169 source = None 1170 reason = None 1171 changes = [] 1172 blamelist = [] 1173 progress = None 1174 started = None 1175 finished = None 1176 currentStep = None 1177 text = [] 1178 results = None 1179 slavename = "???" 1180 1181 # these lists/dicts are defined here so that unserialized instances have 1182 # (empty) values. They are set in __init__ to new objects to make sure 1183 # each instance gets its own copy. 1184 watchers = [] 1185 updates = {} 1186 finishedWatchers = [] 1187 testResults = {} 1188
1189 - def __init__(self, parent, number):
1190 """ 1191 @type parent: L{BuilderStatus} 1192 @type number: int 1193 """ 1194 assert interfaces.IBuilderStatus(parent) 1195 self.builder = parent 1196 self.number = number 1197 self.watchers = [] 1198 self.updates = {} 1199 self.finishedWatchers = [] 1200 self.steps = [] 1201 self.testResults = {} 1202 self.properties = Properties()
1203
1204 - def __repr__(self):
1205 return "<%s #%s>" % (self.__class__.__name__, self.number)
1206 1207 # IBuildStatus 1208
1209 - def getBuilder(self):
1210 """ 1211 @rtype: L{BuilderStatus} 1212 """ 1213 return self.builder
1214
1215 - def getProperty(self, propname):
1216 return self.properties[propname]
1217
1218 - def getProperties(self):
1219 return self.properties
1220
1221 - def getNumber(self):
1222 return self.number
1223
1224 - def getPreviousBuild(self):
1225 if self.number == 0: 1226 return None 1227 return self.builder.getBuild(self.number-1)
1228
1229 - def getSourceStamp(self, absolute=False):
1230 if not absolute or not self.properties.has_key('got_revision'): 1231 return self.source 1232 return self.source.getAbsoluteSourceStamp(self.properties['got_revision'])
1233
1234 - def getReason(self):
1235 return self.reason
1236
1237 - def getChanges(self):
1238 return self.changes
1239
1240 - def getResponsibleUsers(self):
1241 return self.blamelist
1242
1243 - def getInterestedUsers(self):
1244 # TODO: the Builder should add others: sheriffs, domain-owners 1245 return self.blamelist + self.properties.getProperty('owners', [])
1246
1247 - def getSteps(self):
1248 """Return a list of IBuildStepStatus objects. For invariant builds 1249 (those which always use the same set of Steps), this should be the 1250 complete list, however some of the steps may not have started yet 1251 (step.getTimes()[0] will be None). For variant builds, this may not 1252 be complete (asking again later may give you more of them).""" 1253 return self.steps
1254
1255 - def getTimes(self):
1256 return (self.started, self.finished)
1257 1258 _sentinel = [] # used as a sentinel to indicate unspecified initial_value
1259 - def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel):
1260 """Summarize the named statistic over all steps in which it 1261 exists, using combination_fn and initial_value to combine multiple 1262 results into a single result. This translates to a call to Python's 1263 X{reduce}:: 1264 return reduce(summary_fn, step_stats_list, initial_value) 1265 """ 1266 step_stats_list = [ 1267 st.getStatistic(name) 1268 for st in self.steps 1269 if st.hasStatistic(name) ] 1270 if initial_value is self._sentinel: 1271 return reduce(summary_fn, step_stats_list) 1272 else: 1273 return reduce(summary_fn, step_stats_list, initial_value)
1274
1275 - def isFinished(self):
1276 return (self.finished is not None)
1277
1278 - def waitUntilFinished(self):
1279 if self.finished: 1280 d = defer.succeed(self) 1281 else: 1282 d = defer.Deferred() 1283 self.finishedWatchers.append(d) 1284 return d
1285 1286 # while the build is running, the following methods make sense. 1287 # Afterwards they return None 1288
1289 - def getETA(self):
1290 if self.finished is not None: 1291 return None 1292 if not self.progress: 1293 return None 1294 eta = self.progress.eta() 1295 if eta is None: 1296 return None 1297 return eta - util.now()
1298
1299 - def getCurrentStep(self):
1300 return self.currentStep
1301 1302 # Once you know the build has finished, the following methods are legal. 1303 # Before ths build has finished, they all return None. 1304
1305 - def getText(self):
1306 text = [] 1307 text.extend(self.text) 1308 for s in self.steps: 1309 text.extend(s.text2) 1310 return text
1311
1312 - def getResults(self):
1313 return self.results
1314
1315 - def getSlavename(self):
1316 return self.slavename
1317
1318 - def getTestResults(self):
1319 return self.testResults
1320
1321 - def getTestResultsOrd(self):
1322 trs = self.testResults.keys() 1323 trs.sort() 1324 ret = [ self.testResults[t] for t in trs] 1325 return ret
1326
1327 - def getLogs(self):
1328 # TODO: steps should contribute significant logs instead of this 1329 # hack, which returns every log from every step. The logs should get 1330 # names like "compile" and "test" instead of "compile.output" 1331 logs = [] 1332 for s in self.steps: 1333 for log in s.getLogs(): 1334 logs.append(log) 1335 return logs
1336 1337 # subscription interface 1338
1339 - def subscribe(self, receiver, updateInterval=None):
1340 # will receive stepStarted and stepFinished messages 1341 # and maybe buildETAUpdate 1342 self.watchers.append(receiver) 1343 if updateInterval is not None: 1344 self.sendETAUpdate(receiver, updateInterval)
1345
1346 - def sendETAUpdate(self, receiver, updateInterval):
1347 self.updates[receiver] = None 1348 ETA = self.getETA() 1349 if ETA is not None: 1350 receiver.buildETAUpdate(self, self.getETA()) 1351 # they might have unsubscribed during buildETAUpdate 1352 if receiver in self.watchers: 1353 self.updates[receiver] = reactor.callLater(updateInterval, 1354 self.sendETAUpdate, 1355 receiver, 1356 updateInterval)
1357
1358 - def unsubscribe(self, receiver):
1359 if receiver in self.watchers: 1360 self.watchers.remove(receiver) 1361 if receiver in self.updates: 1362 if self.updates[receiver] is not None: 1363 self.updates[receiver].cancel() 1364 del self.updates[receiver]
1365 1366 # methods for the base.Build to invoke 1367
1368 - def addStepWithName(self, name):
1369 """The Build is setting up, and has added a new BuildStep to its 1370 list. Create a BuildStepStatus object to which it can send status 1371 updates.""" 1372 1373 s = BuildStepStatus(self, len(self.steps)) 1374 s.setName(name) 1375 self.steps.append(s) 1376 return s
1377
1378 - def setProperty(self, propname, value, source, runtime=True):
1379 self.properties.setProperty(propname, value, source, runtime)
1380
1381 - def addTestResult(self, result):
1382 self.testResults[result.getName()] = result
1383
1384 - def setSourceStamp(self, sourceStamp):
1385 self.source = sourceStamp 1386 self.changes = self.source.changes
1387
1388 - def setReason(self, reason):
1389 self.reason = reason
1390 - def setBlamelist(self, blamelist):
1391 self.blamelist = blamelist
1392 - def setProgress(self, progress):
1393 self.progress = progress
1394
1395 - def buildStarted(self, build):
1396 """The Build has been set up and is about to be started. It can now 1397 be safely queried, so it is time to announce the new build.""" 1398 1399 self.started = util.now() 1400 # now that we're ready to report status, let the BuilderStatus tell 1401 # the world about us 1402 self.builder.buildStarted(self)
1403
1404 - def setSlavename(self, slavename):
1405 self.slavename = slavename
1406
1407 - def setText(self, text):
1408 assert isinstance(text, (list, tuple)) 1409 self.text = text
1410 - def setResults(self, results):
1411 self.results = results
1412
1413 - def buildFinished(self):
1414 self.currentStep = None 1415 self.finished = util.now() 1416 1417 for r in self.updates.keys(): 1418 if self.updates[r] is not None: 1419 self.updates[r].cancel() 1420 del self.updates[r] 1421 1422 watchers = self.finishedWatchers 1423 self.finishedWatchers = [] 1424 for w in watchers: 1425 w.callback(self)
1426 1427 # methods called by our BuildStepStatus children 1428
1429 - def stepStarted(self, step):
1430 self.currentStep = step 1431 for w in self.watchers: 1432 receiver = w.stepStarted(self, step) 1433 if receiver: 1434 if type(receiver) == type(()): 1435 step.subscribe(receiver[0], receiver[1]) 1436 else: 1437 step.subscribe(receiver) 1438 d = step.waitUntilFinished() 1439 d.addCallback(lambda step: step.unsubscribe(receiver)) 1440 1441 step.waitUntilFinished().addCallback(self._stepFinished)
1442
1443 - def _stepFinished(self, step):
1444 results = step.getResults() 1445 for w in self.watchers: 1446 w.stepFinished(self, step, results)
1447 1448 # methods called by our BuilderStatus parent 1449
1450 - def pruneSteps(self):
1451 # this build is very old: remove the build steps too 1452 self.steps = []
1453 1454 # persistence stuff 1455
1456 - def generateLogfileName(self, stepname, logname):
1457 """Return a filename (relative to the Builder's base directory) where 1458 the logfile's contents can be stored uniquely. 1459 1460 The base filename is made by combining our build number, the Step's 1461 name, and the log's name, then removing unsuitable characters. The 1462 filename is then made unique by appending _0, _1, etc, until it does 1463 not collide with any other logfile. 1464 1465 These files are kept in the Builder's basedir (rather than a 1466 per-Build subdirectory) because that makes cleanup easier: cron and 1467 find will help get rid of the old logs, but the empty directories are 1468 more of a hassle to remove.""" 1469 1470 starting_filename = "%d-log-%s-%s" % (self.number, stepname, logname) 1471 starting_filename = re.sub(r'[^\w\.\-]', '_', starting_filename) 1472 # now make it unique 1473 unique_counter = 0 1474 filename = starting_filename 1475 while filename in [l.filename 1476 for step in self.steps 1477 for l in step.getLogs() 1478 if l.filename]: 1479 filename = "%s_%d" % (starting_filename, unique_counter) 1480 unique_counter += 1 1481 return filename
1482
1483 - def __getstate__(self):
1484 d = styles.Versioned.__getstate__(self) 1485 # for now, a serialized Build is always "finished". We will never 1486 # save unfinished builds. 1487 if not self.finished: 1488 d['finished'] = True 1489 # TODO: push an "interrupted" step so it is clear that the build 1490 # was interrupted. The builder will have a 'shutdown' event, but 1491 # someone looking at just this build will be confused as to why 1492 # the last log is truncated. 1493 for k in 'builder', 'watchers', 'updates', 'finishedWatchers': 1494 if k in d: del d[k] 1495 return d
1496
1497 - def __setstate__(self, d):
1498 styles.Versioned.__setstate__(self, d) 1499 # self.builder must be filled in by our parent when loading 1500 for step in self.steps: 1501 step.build = self 1502 self.watchers = [] 1503 self.updates = {} 1504 self.finishedWatchers = []
1505
1506 - def upgradeToVersion1(self):
1507 if hasattr(self, "sourceStamp"): 1508 # the old .sourceStamp attribute wasn't actually very useful 1509 maxChangeNumber, patch = self.sourceStamp 1510 changes = getattr(self, 'changes', []) 1511 source = sourcestamp.SourceStamp(branch=None, 1512 revision=None, 1513 patch=patch, 1514 changes=changes) 1515 self.source = source 1516 self.changes = source.changes 1517 del self.sourceStamp 1518 self.wasUpgraded = True
1519
1520 - def upgradeToVersion2(self):
1521 self.properties = {} 1522 self.wasUpgraded = True
1523
1524 - def upgradeToVersion3(self):
1525 # in version 3, self.properties became a Properties object 1526 propdict = self.properties 1527 self.properties = Properties() 1528 self.properties.update(propdict, "Upgrade from previous version") 1529 self.wasUpgraded = True
1530
1531 - def upgradeLogfiles(self):
1532 # upgrade any LogFiles that need it. This must occur after we've been 1533 # attached to our Builder, and after we know about all LogFiles of 1534 # all Steps (to get the filenames right). 1535 assert self.builder 1536 for s in self.steps: 1537 for l in s.getLogs(): 1538 if l.filename: 1539 pass # new-style, log contents are on disk 1540 else: 1541 logfilename = self.generateLogfileName(s.name, l.name) 1542 # let the logfile update its .filename pointer, 1543 # transferring its contents onto disk if necessary 1544 l.upgrade(logfilename)
1545
1546 - def checkLogfiles(self):
1547 # check that all logfiles exist, and remove references to any that 1548 # have been deleted (e.g., by purge()) 1549 for s in self.steps: 1550 s.checkLogfiles()
1551
1552 - def saveYourself(self):
1553 filename = os.path.join(self.builder.basedir, "%d" % self.number) 1554 if os.path.isdir(filename): 1555 # leftover from 0.5.0, which stored builds in directories 1556 shutil.rmtree(filename, ignore_errors=True) 1557 tmpfilename = filename + ".tmp" 1558 try: 1559 dump(self, open(tmpfilename, "wb"), -1) 1560 if runtime.platformType == 'win32': 1561 # windows cannot rename a file on top of an existing one, so 1562 # fall back to delete-first. There are ways this can fail and 1563 # lose the builder's history, so we avoid using it in the 1564 # general (non-windows) case 1565 if os.path.exists(filename): 1566 os.unlink(filename) 1567 os.rename(tmpfilename, filename) 1568 except: 1569 log.msg("unable to save build %s-#%d" % (self.builder.name, 1570 self.number)) 1571 log.err()
1572
1573 - def asDict(self):
1574 result = {} 1575 # Constant 1576 result['builderName'] = self.builder.name 1577 result['number'] = self.getNumber() 1578 result['sourceStamp'] = self.getSourceStamp().asDict() 1579 result['reason'] = self.getReason() 1580 result['blame'] = self.getResponsibleUsers() 1581 1582 # Transient 1583 result['properties'] = self.getProperties().asList() 1584 result['times'] = self.getTimes() 1585 result['text'] = self.getText() 1586 result['results'] = self.getResults() 1587 result['slave'] = self.getSlavename() 1588 # TODO(maruel): Add. 1589 #result['test_results'] = self.getTestResults() 1590 # TODO(maruel): Include the url? It's too heavy otherwise. 1591 #result['logs'] = self.getLogs() 1592 result['eta'] = self.getETA() 1593 result['steps'] = [bss.asDict() for bss in self.steps] 1594 if self.getCurrentStep(): 1595 result['currentStep'] = self.getCurrentStep().asDict() 1596 else: 1597 result['currentStep'] = None 1598 return result
1599
1600 1601 1602 -class BuilderStatus(styles.Versioned):
1603 """I handle status information for a single process.base.Builder object. 1604 That object sends status changes to me (frequently as Events), and I 1605 provide them on demand to the various status recipients, like the HTML 1606 waterfall display and the live status clients. It also sends build 1607 summaries to me, which I log and provide to status clients who aren't 1608 interested in seeing details of the individual build steps. 1609 1610 I am responsible for maintaining the list of historic Events and Builds, 1611 pruning old ones, and loading them from / saving them to disk. 1612 1613 I live in the buildbot.process.base.Builder object, in the 1614 .builder_status attribute. 1615 1616 @type category: string 1617 @ivar category: user-defined category this builder belongs to; can be 1618 used to filter on in status clients 1619 """ 1620 1621 implements(interfaces.IBuilderStatus, interfaces.IEventSource) 1622 1623 persistenceVersion = 1 1624 persistenceForgets = ( 'wasUpgraded', ) 1625 1626 # these limit the amount of memory we consume, as well as the size of the 1627 # main Builder pickle. The Build and LogFile pickles on disk must be 1628 # handled separately. 1629 buildCacheSize = 15 1630 eventHorizon = 50 # forget events beyond this 1631 1632 # these limit on-disk storage 1633 logHorizon = 40 # forget logs in steps in builds beyond this 1634 buildHorizon = 100 # forget builds beyond this 1635 1636 category = None 1637 currentBigState = "offline" # or idle/waiting/interlocked/building 1638 basedir = None # filled in by our parent 1639
1640 - def __init__(self, buildername, category=None):
1641 self.name = buildername 1642 self.category = category 1643 1644 self.slavenames = [] 1645 self.events = [] 1646 # these three hold Events, and are used to retrieve the current 1647 # state of the boxes. 1648 self.lastBuildStatus = None 1649 #self.currentBig = None 1650 #self.currentSmall = None 1651 self.currentBuilds = [] 1652 self.nextBuild = None 1653 self.watchers = [] 1654 self.buildCache = weakref.WeakValueDictionary() 1655 self.buildCache_LRU = [] 1656 self.logCompressionLimit = False # default to no compression for tests 1657 self.logCompressionMethod = "bz2" 1658 self.logMaxSize = None # No default limit 1659 self.logMaxTailSize = None # No tail buffering
1660 1661 # persistence 1662
1663 - def __getstate__(self):
1664 # when saving, don't record transient stuff like what builds are 1665 # currently running, because they won't be there when we start back 1666 # up. Nor do we save self.watchers, nor anything that gets set by our 1667 # parent like .basedir and .status 1668 d = styles.Versioned.__getstate__(self) 1669 d['watchers'] = [] 1670 del d['buildCache'] 1671 del d['buildCache_LRU'] 1672 for b in self.currentBuilds: 1673 b.saveYourself() 1674 # TODO: push a 'hey, build was interrupted' event 1675 del d['currentBuilds'] 1676 d.pop('pendingBuilds', None) 1677 del d['currentBigState'] 1678 del d['basedir'] 1679 del d['status'] 1680 del d['nextBuildNumber'] 1681 return d
1682
1683 - def __setstate__(self, d):
1684 # when loading, re-initialize the transient stuff. Remember that 1685 # upgradeToVersion1 and such will be called after this finishes. 1686 styles.Versioned.__setstate__(self, d) 1687 self.buildCache = weakref.WeakValueDictionary() 1688 self.buildCache_LRU = [] 1689 self.currentBuilds = [] 1690 self.watchers = [] 1691 self.slavenames = []
1692 # self.basedir must be filled in by our parent 1693 # self.status must be filled in by our parent 1694
1695 - def reconfigFromBuildmaster(self, buildmaster):
1696 # Note that we do not hang onto the buildmaster, since this object 1697 # gets pickled and unpickled. 1698 if buildmaster.buildCacheSize is not None: 1699 self.buildCacheSize = buildmaster.buildCacheSize
1700
1701 - def upgradeToVersion1(self):
1702 if hasattr(self, 'slavename'): 1703 self.slavenames = [self.slavename] 1704 del self.slavename 1705 if hasattr(self, 'nextBuildNumber'): 1706 del self.nextBuildNumber # determineNextBuildNumber chooses this 1707 self.wasUpgraded = True
1708
1709 - def determineNextBuildNumber(self):
1710 """Scan our directory of saved BuildStatus instances to determine 1711 what our self.nextBuildNumber should be. Set it one larger than the 1712 highest-numbered build we discover. This is called by the top-level 1713 Status object shortly after we are created or loaded from disk. 1714 """ 1715 existing_builds = [int(f) 1716 for f in os.listdir(self.basedir) 1717 if re.match("^\d+$", f)] 1718 if existing_builds: 1719 self.nextBuildNumber = max(existing_builds) + 1 1720 else: 1721 self.nextBuildNumber = 0
1722
1723 - def setLogCompressionLimit(self, lowerLimit):
1724 self.logCompressionLimit = lowerLimit
1725
1726 - def setLogCompressionMethod(self, method):
1727 assert method in ("bz2", "gz") 1728 self.logCompressionMethod = method
1729
1730 - def setLogMaxSize(self, upperLimit):
1731 self.logMaxSize = upperLimit
1732
1733 - def setLogMaxTailSize(self, tailSize):
1734 self.logMaxTailSize = tailSize
1735
1736 - def saveYourself(self):
1737 for b in self.currentBuilds: 1738 if not b.isFinished: 1739 # interrupted build, need to save it anyway. 1740 # BuildStatus.saveYourself will mark it as interrupted. 1741 b.saveYourself() 1742 filename = os.path.join(self.basedir, "builder") 1743 tmpfilename = filename + ".tmp" 1744 try: 1745 dump(self, open(tmpfilename, "wb"), -1) 1746 if runtime.platformType == 'win32': 1747 # windows cannot rename a file on top of an existing one 1748 if os.path.exists(filename): 1749 os.unlink(filename) 1750 os.rename(tmpfilename, filename) 1751 except: 1752 log.msg("unable to save builder %s" % self.name) 1753 log.err()
1754 1755 1756 # build cache management 1757
1758 - def makeBuildFilename(self, number):
1759 return os.path.join(self.basedir, "%d" % number)
1760
1761 - def touchBuildCache(self, build):
1762 self.buildCache[build.number] = build 1763 if build in self.buildCache_LRU: 1764 self.buildCache_LRU.remove(build) 1765 self.buildCache_LRU = self.buildCache_LRU[-(self.buildCacheSize-1):] + [ build ] 1766 return build
1767
1768 - def getBuildByNumber(self, number):
1769 # first look in currentBuilds 1770 for b in self.currentBuilds: 1771 if b.number == number: 1772 return self.touchBuildCache(b) 1773 1774 # then in the buildCache 1775 if number in self.buildCache: 1776 return self.touchBuildCache(self.buildCache[number]) 1777 1778 # then fall back to loading it from disk 1779 filename = self.makeBuildFilename(number) 1780 try: 1781 log.msg("Loading builder %s's build %d from on-disk pickle" 1782 % (self.name, number)) 1783 build = load(open(filename, "rb")) 1784 build.builder = self 1785 1786 # (bug #1068) if we need to upgrade, we probably need to rewrite 1787 # this pickle, too. We determine this by looking at the list of 1788 # Versioned objects that have been unpickled, and (after doUpgrade) 1789 # checking to see if any of them set wasUpgraded. The Versioneds' 1790 # upgradeToVersionNN methods all set this. 1791 versioneds = styles.versionedsToUpgrade 1792 styles.doUpgrade() 1793 if True in [ hasattr(o, 'wasUpgraded') for o in versioneds.values() ]: 1794 log.msg("re-writing upgraded build pickle") 1795 build.saveYourself() 1796 1797 # handle LogFiles from after 0.5.0 and before 0.6.5 1798 build.upgradeLogfiles() 1799 # check that logfiles exist 1800 build.checkLogfiles() 1801 return self.touchBuildCache(build) 1802 except IOError: 1803 raise IndexError("no such build %d" % number) 1804 except EOFError: 1805 raise IndexError("corrupted build pickle %d" % number)
1806
1807 - def prune(self, events_only=False):
1808 # begin by pruning our own events 1809 self.events = self.events[-self.eventHorizon:] 1810 1811 if events_only: 1812 return 1813 1814 gc.collect() 1815 1816 # get the horizons straight 1817 if self.buildHorizon is not None: 1818 earliest_build = self.nextBuildNumber - self.buildHorizon 1819 else: 1820 earliest_build = 0 1821 1822 if self.logHorizon is not None: 1823 earliest_log = self.nextBuildNumber - self.logHorizon 1824 else: 1825 earliest_log = 0 1826 1827 if earliest_log < earliest_build: 1828 earliest_log = earliest_build 1829 1830 if earliest_build == 0: 1831 return 1832 1833 # skim the directory and delete anything that shouldn't be there anymore 1834 build_re = re.compile(r"^([0-9]+)$") 1835 build_log_re = re.compile(r"^([0-9]+)-.*$") 1836 # if the directory doesn't exist, bail out here 1837 if not os.path.exists(self.basedir): 1838 return 1839 1840 for filename in os.listdir(self.basedir): 1841 num = None 1842 mo = build_re.match(filename) 1843 is_logfile = False 1844 if mo: 1845 num = int(mo.group(1)) 1846 else: 1847 mo = build_log_re.match(filename) 1848 if mo: 1849 num = int(mo.group(1)) 1850 is_logfile = True 1851 1852 if num is None: continue 1853 if num in self.buildCache: continue 1854 1855 if (is_logfile and num < earliest_log) or num < earliest_build: 1856 pathname = os.path.join(self.basedir, filename) 1857 log.msg("pruning '%s'" % pathname) 1858 try: os.unlink(pathname) 1859 except OSError: pass
1860 1861 # IBuilderStatus methods
1862 - def getName(self):
1863 return self.name
1864
1865 - def getState(self):
1866 return (self.currentBigState, self.currentBuilds)
1867
1868 - def getSlaves(self):
1869 return [self.status.getSlave(name) for name in self.slavenames]
1870
1871 - def getPendingBuilds(self):
1872 db = self.status.db 1873 return [BuildRequestStatus(brid, self.status, db) 1874 for brid in db.get_pending_brids_for_builder(self.name)]
1875
1876 - def getCurrentBuilds(self):
1877 return self.currentBuilds
1878
1879 - def getLastFinishedBuild(self):
1880 b = self.getBuild(-1) 1881 if not (b and b.isFinished()): 1882 b = self.getBuild(-2) 1883 return b
1884
1885 - def getCategory(self):
1886 return self.category
1887
1888 - def getBuild(self, number):
1889 if number < 0: 1890 number = self.nextBuildNumber + number 1891 if number < 0 or number >= self.nextBuildNumber: 1892 return None 1893 1894 try: 1895 return self.getBuildByNumber(number) 1896 except IndexError: 1897 return None
1898
1899 - def getEvent(self, number):
1900 try: 1901 return self.events[number] 1902 except IndexError: 1903 return None
1904
1905 - def generateFinishedBuilds(self, branches=[], 1906 num_builds=None, 1907 max_buildnum=None, 1908 finished_before=None, 1909 max_search=200):
1910 got = 0 1911 for Nb in itertools.count(1): 1912 if Nb > self.nextBuildNumber: 1913 break 1914 if Nb > max_search: 1915 break 1916 build = self.getBuild(-Nb) 1917 if build is None: 1918 continue 1919 if max_buildnum is not None: 1920 if build.getNumber() > max_buildnum: 1921 continue 1922 if not build.isFinished(): 1923 continue 1924 if finished_before is not None: 1925 start, end = build.getTimes() 1926 if end >= finished_before: 1927 continue 1928 if branches: 1929 if build.getSourceStamp().branch not in branches: 1930 continue 1931 got += 1 1932 yield build 1933 if num_builds is not None: 1934 if got >= num_builds: 1935 return
1936
1937 - def eventGenerator(self, branches=[], categories=[], committers=[], minTime=0):
1938 """This function creates a generator which will provide all of this 1939 Builder's status events, starting with the most recent and 1940 progressing backwards in time. """ 1941 1942 # remember the oldest-to-earliest flow here. "next" means earlier. 1943 1944 # TODO: interleave build steps and self.events by timestamp. 1945 # TODO: um, I think we're already doing that. 1946 1947 # TODO: there's probably something clever we could do here to 1948 # interleave two event streams (one from self.getBuild and the other 1949 # from self.getEvent), which would be simpler than this control flow 1950 1951 eventIndex = -1 1952 e = self.getEvent(eventIndex) 1953 for Nb in range(1, self.nextBuildNumber+1): 1954 b = self.getBuild(-Nb) 1955 if not b: 1956 # HACK: If this is the first build we are looking at, it is 1957 # possible it's in progress but locked before it has written a 1958 # pickle; in this case keep looking. 1959 if Nb == 1: 1960 continue 1961 break 1962 if b.getTimes()[0] < minTime: 1963 break 1964 if branches and not b.getSourceStamp().branch in branches: 1965 continue 1966 if categories and not b.getBuilder().getCategory() in categories: 1967 continue 1968 if committers and not [True for c in b.getChanges() if c.who in committers]: 1969 continue 1970 steps = b.getSteps() 1971 for Ns in range(1, len(steps)+1): 1972 if steps[-Ns].started: 1973 step_start = steps[-Ns].getTimes()[0] 1974 while e is not None and e.getTimes()[0] > step_start: 1975 yield e 1976 eventIndex -= 1 1977 e = self.getEvent(eventIndex) 1978 yield steps[-Ns] 1979 yield b 1980 while e is not None: 1981 yield e 1982 eventIndex -= 1 1983 e = self.getEvent(eventIndex) 1984 if e and e.getTimes()[0] < minTime: 1985 break
1986
1987 - def subscribe(self, receiver):
1988 # will get builderChangedState, buildStarted, buildFinished, 1989 # requestSubmitted, requestCancelled. Note that a request which is 1990 # resubmitted (due to a slave disconnect) will cause requestSubmitted 1991 # to be invoked multiple times. 1992 self.watchers.append(receiver) 1993 self.publishState(receiver) 1994 # our parent Status provides requestSubmitted and requestCancelled 1995 self.status._builder_subscribe(self.name, receiver)
1996
1997 - def unsubscribe(self, receiver):
1998 self.watchers.remove(receiver) 1999 self.status._builder_unsubscribe(self.name, receiver)
2000 2001 ## Builder interface (methods called by the Builder which feeds us) 2002
2003 - def setSlavenames(self, names):
2004 self.slavenames = names
2005
2006 - def addEvent(self, text=[]):
2007 # this adds a duration event. When it is done, the user should call 2008 # e.finish(). They can also mangle it by modifying .text 2009 e = Event() 2010 e.started = util.now() 2011 e.text = text 2012 self.events.append(e) 2013 self.prune(events_only=True) 2014 return e # they are free to mangle it further
2015
2016 - def addPointEvent(self, text=[]):
2017 # this adds a point event, one which occurs as a single atomic 2018 # instant of time. 2019 e = Event() 2020 e.started = util.now() 2021 e.finished = 0 2022 e.text = text 2023 self.events.append(e) 2024 self.prune(events_only=True) 2025 return e # for consistency, but they really shouldn't touch it
2026
2027 - def setBigState(self, state):
2028 needToUpdate = state != self.currentBigState 2029 self.currentBigState = state 2030 if needToUpdate: 2031 self.publishState()
2032
2033 - def publishState(self, target=None):
2034 state = self.currentBigState 2035 2036 if target is not None: 2037 # unicast 2038 target.builderChangedState(self.name, state) 2039 return 2040 for w in self.watchers: 2041 try: 2042 w.builderChangedState(self.name, state) 2043 except: 2044 log.msg("Exception caught publishing state to %r" % w) 2045 log.err()
2046
2047 - def newBuild(self):
2048 """The Builder has decided to start a build, but the Build object is 2049 not yet ready to report status (it has not finished creating the 2050 Steps). Create a BuildStatus object that it can use.""" 2051 number = self.nextBuildNumber 2052 self.nextBuildNumber += 1 2053 # TODO: self.saveYourself(), to make sure we don't forget about the 2054 # build number we've just allocated. This is not quite as important 2055 # as it was before we switch to determineNextBuildNumber, but I think 2056 # it may still be useful to have the new build save itself. 2057 s = BuildStatus(self, number) 2058 s.waitUntilFinished().addCallback(self._buildFinished) 2059 return s
2060 2061 # buildStarted is called by our child BuildStatus instances
2062 - def buildStarted(self, s):
2063 """Now the BuildStatus object is ready to go (it knows all of its 2064 Steps, its ETA, etc), so it is safe to notify our watchers.""" 2065 2066 assert s.builder is self # paranoia 2067 assert s.number == self.nextBuildNumber - 1 2068 assert s not in self.currentBuilds 2069 self.currentBuilds.append(s) 2070 self.touchBuildCache(s) 2071 2072 # now that the BuildStatus is prepared to answer queries, we can 2073 # announce the new build to all our watchers 2074 2075 for w in self.watchers: # TODO: maybe do this later? callLater(0)? 2076 try: 2077 receiver = w.buildStarted(self.getName(), s) 2078 if receiver: 2079 if type(receiver) == type(()): 2080 s.subscribe(receiver[0], receiver[1]) 2081 else: 2082 s.subscribe(receiver) 2083 d = s.waitUntilFinished() 2084 d.addCallback(lambda s: s.unsubscribe(receiver)) 2085 except: 2086 log.msg("Exception caught notifying %r of buildStarted event" % w) 2087 log.err()
2088
2089 - def _buildFinished(self, s):
2090 assert s in self.currentBuilds 2091 s.saveYourself() 2092 self.currentBuilds.remove(s) 2093 2094 name = self.getName() 2095 results = s.getResults() 2096 for w in self.watchers: 2097 try: 2098 w.buildFinished(name, s, results) 2099 except: 2100 log.msg("Exception caught notifying %r of buildFinished event" % w) 2101 log.err() 2102 2103 self.prune() # conserve disk
2104 2105 2106 # waterfall display (history) 2107 2108 # I want some kind of build event that holds everything about the build: 2109 # why, what changes went into it, the results of the build, itemized 2110 # test results, etc. But, I do kind of need something to be inserted in 2111 # the event log first, because intermixing step events and the larger 2112 # build event is fraught with peril. Maybe an Event-like-thing that 2113 # doesn't have a file in it but does have links. Hmm, that's exactly 2114 # what it does now. The only difference would be that this event isn't 2115 # pushed to the clients. 2116 2117 # publish to clients
2118 - def sendLastBuildStatus(self, client):
2119 #client.newLastBuildStatus(self.lastBuildStatus) 2120 pass
2122 for s in self.subscribers: 2123 self.sendCurrentActivityBig(s)
2124 - def sendCurrentActivityBig(self, client):
2125 state = self.currentBigState 2126 if state == "offline": 2127 client.currentlyOffline() 2128 elif state == "idle": 2129 client.currentlyIdle() 2130 elif state == "building": 2131 client.currentlyBuilding() 2132 else: 2133 log.msg("Hey, self.currentBigState is weird:", state)
2134 2135 2136 ## HTML display interface 2137
2138 - def getEventNumbered(self, num):
2139 # deal with dropped events, pruned events 2140 first = self.events[0].number 2141 if first + len(self.events)-1 != self.events[-1].number: 2142 log.msg(self, 2143 "lost an event somewhere: [0] is %d, [%d] is %d" % \ 2144 (self.events[0].number, 2145 len(self.events) - 1, 2146 self.events[-1].number)) 2147 for e in self.events: 2148 log.msg("e[%d]: " % e.number, e) 2149 return None 2150 offset = num - first 2151 log.msg(self, "offset", offset) 2152 try: 2153 return self.events[offset] 2154 except IndexError: 2155 return None
2156 2157 ## Persistence of Status
2158 - def loadYourOldEvents(self):
2159 if hasattr(self, "allEvents"): 2160 # first time, nothing to get from file. Note that this is only if 2161 # the Application gets .run() . If it gets .save()'ed, then the 2162 # .allEvents attribute goes away in the initial __getstate__ and 2163 # we try to load a non-existent file. 2164 return 2165 self.allEvents = self.loadFile("events", []) 2166 if self.allEvents: 2167 self.nextEventNumber = self.allEvents[-1].number + 1 2168 else: 2169 self.nextEventNumber = 0
2170 - def saveYourOldEvents(self):
2171 self.saveFile("events", self.allEvents)
2172 2173 ## clients 2174
2175 - def addClient(self, client):
2176 if client not in self.subscribers: 2177