Package buildbot :: Package slave :: Module commands
[frames] | no frames]

Source Code for Module buildbot.slave.commands

   1  # -*- test-case-name: buildbot.test.test_slavecommand -*- 
   2   
   3  import os, sys, re, signal, shutil, types, time, tarfile, tempfile 
   4  from stat import ST_CTIME, ST_MTIME, ST_SIZE 
   5  from xml.dom.minidom import parseString 
   6   
   7  from zope.interface import implements 
   8  from twisted.internet.protocol import ProcessProtocol 
   9  from twisted.internet import reactor, defer, task 
  10  from twisted.python import log, failure, runtime 
  11  from twisted.python.procutils import which 
  12   
  13  from buildbot.slave.interfaces import ISlaveCommand 
  14  from buildbot.slave.registry import registerSlaveCommand 
  15  from buildbot.util import to_text, remove_userpassword 
  16   
  17  # this used to be a CVS $-style "Revision" auto-updated keyword, but since I 
  18  # moved to Darcs as the primary repository, this is updated manually each 
  19  # time this file is changed. The last cvs_ver that was here was 1.51 . 
  20  command_version = "2.9" 
  21   
  22  # version history: 
  23  #  >=1.17: commands are interruptable 
  24  #  >=1.28: Arch understands 'revision', added Bazaar 
  25  #  >=1.33: Source classes understand 'retry' 
  26  #  >=1.39: Source classes correctly handle changes in branch (except Git) 
  27  #          Darcs accepts 'revision' (now all do but Git) (well, and P4Sync) 
  28  #          Arch/Baz should accept 'build-config' 
  29  #  >=1.51: (release 0.7.3) 
  30  #  >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open', 
  31  #          and 'logfiles'. It now sends 'log' messages in addition to 
  32  #          stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods, 
  33  #          but these are not remotely callable yet. 
  34  #          (not externally visible: ShellCommandPP has writeStdin/closeStdin. 
  35  #          ShellCommand accepts new arguments (logfiles=, initialStdin=, 
  36  #          keepStdinOpen=) and no longer accepts stdin=) 
  37  #          (release 0.7.4) 
  38  #  >= 2.2: added monotone, uploadFile, and downloadFile (release 0.7.5) 
  39  #  >= 2.3: added bzr (release 0.7.6) 
  40  #  >= 2.4: Git understands 'revision' and branches 
  41  #  >= 2.5: workaround added for remote 'hg clone --rev REV' when hg<0.9.2 
  42  #  >= 2.6: added uploadDirectory 
  43  #  >= 2.7: added usePTY option to SlaveShellCommand 
  44  #  >= 2.8: added username and password args to SVN class 
  45   
46 -class CommandInterrupted(Exception):
47 pass
48 -class TimeoutError(Exception):
49 pass
50
51 -class Obfuscated:
52 """An obfuscated string in a command"""
53 - def __init__(self, real, fake):
54 self.real = real 55 self.fake = fake
56
57 - def __str__(self):
58 return self.fake
59
60 - def __repr__(self):
61 return `self.fake`
62
63 - def get_real(command):
64 rv = command 65 if type(command) == types.ListType: 66 rv = [] 67 for elt in command: 68 if isinstance(elt, Obfuscated): 69 rv.append(elt.real) 70 else: 71 rv.append(to_text(elt)) 72 return rv
73 get_real = staticmethod(get_real) 74
75 - def get_fake(command):
76 rv = command 77 if type(command) == types.ListType: 78 rv = [] 79 for elt in command: 80 if isinstance(elt, Obfuscated): 81 rv.append(elt.fake) 82 else: 83 rv.append(to_text(elt)) 84 return rv
85 get_fake = staticmethod(get_fake)
86
87 -class AbandonChain(Exception):
88 """A series of chained steps can raise this exception to indicate that 89 one of the intermediate ShellCommands has failed, such that there is no 90 point in running the remainder. 'rc' should be the non-zero exit code of 91 the failing ShellCommand.""" 92
93 - def __repr__(self):
94 return "<AbandonChain rc=%s>" % self.args[0]
95
96 -def getCommand(name):
97 possibles = which(name) 98 if not possibles: 99 raise RuntimeError("Couldn't find executable for '%s'" % name) 100 return possibles[0]
101
102 -def rmdirRecursive(dir):
103 """This is a replacement for shutil.rmtree that works better under 104 windows. Thanks to Bear at the OSAF for the code.""" 105 if not os.path.exists(dir): 106 return 107 108 if os.path.islink(dir): 109 os.remove(dir) 110 return 111 112 # Verify the directory is read/write/execute for the current user 113 os.chmod(dir, 0700) 114 115 for name in os.listdir(dir): 116 full_name = os.path.join(dir, name) 117 # on Windows, if we don't have write permission we can't remove 118 # the file/directory either, so turn that on 119 if os.name == 'nt': 120 if not os.access(full_name, os.W_OK): 121 # I think this is now redundant, but I don't have an NT 122 # machine to test on, so I'm going to leave it in place 123 # -warner 124 os.chmod(full_name, 0600) 125 126 if os.path.isdir(full_name): 127 rmdirRecursive(full_name) 128 else: 129 if os.path.isfile(full_name): 130 os.chmod(full_name, 0700) 131 os.remove(full_name) 132 os.rmdir(dir)
133
134 -class ShellCommandPP(ProcessProtocol):
135 debug = False 136
137 - def __init__(self, command):
138 self.command = command 139 self.pending_stdin = "" 140 self.stdin_finished = False
141
142 - def writeStdin(self, data):
143 assert not self.stdin_finished 144 if self.connected: 145 self.transport.write(data) 146 else: 147 self.pending_stdin += data
148
149 - def closeStdin(self):
150 if self.connected: 151 if self.debug: log.msg(" closing stdin") 152 self.transport.closeStdin() 153 self.stdin_finished = True
154
155 - def connectionMade(self):
156 if self.debug: 157 log.msg("ShellCommandPP.connectionMade") 158 if not self.command.process: 159 if self.debug: 160 log.msg(" assigning self.command.process: %s" % 161 (self.transport,)) 162 self.command.process = self.transport 163 164 # TODO: maybe we shouldn't close stdin when using a PTY. I can't test 165 # this yet, recent debian glibc has a bug which causes thread-using 166 # test cases to SIGHUP trial, and the workaround is to either run 167 # the whole test with /bin/sh -c " ".join(argv) (way gross) or to 168 # not use a PTY. Once the bug is fixed, I'll be able to test what 169 # happens when you close stdin on a pty. My concern is that it will 170 # SIGHUP the child (since we are, in a sense, hanging up on them). 171 # But it may well be that keeping stdout open prevents the SIGHUP 172 # from being sent. 173 #if not self.command.usePTY: 174 175 if self.pending_stdin: 176 if self.debug: log.msg(" writing to stdin") 177 self.transport.write(self.pending_stdin) 178 if self.stdin_finished: 179 if self.debug: log.msg(" closing stdin") 180 self.transport.closeStdin()
181
182 - def outReceived(self, data):
183 if self.debug: 184 log.msg("ShellCommandPP.outReceived") 185 self.command.addStdout(data)
186
187 - def errReceived(self, data):
188 if self.debug: 189 log.msg("ShellCommandPP.errReceived") 190 self.command.addStderr(data)
191
192 - def processEnded(self, status_object):
193 if self.debug: 194 log.msg("ShellCommandPP.processEnded", status_object) 195 # status_object is a Failure wrapped around an 196 # error.ProcessTerminated or and error.ProcessDone. 197 # requires twisted >= 1.0.4 to overcome a bug in process.py 198 sig = status_object.value.signal 199 rc = status_object.value.exitCode 200 self.command.finished(sig, rc)
201
202 -class LogFileWatcher:
203 POLL_INTERVAL = 2 204
205 - def __init__(self, command, name, logfile, follow=False):
206 self.command = command 207 self.name = name 208 self.logfile = logfile 209 210 log.msg("LogFileWatcher created to watch %s" % logfile) 211 # we are created before the ShellCommand starts. If the logfile we're 212 # supposed to be watching already exists, record its size and 213 # ctime/mtime so we can tell when it starts to change. 214 self.old_logfile_stats = self.statFile() 215 self.started = False 216 217 # follow the file, only sending back lines 218 # added since we started watching 219 self.follow = follow 220 221 # every 2 seconds we check on the file again 222 self.poller = task.LoopingCall(self.poll)
223
224 - def start(self):
225 self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
226
227 - def _cleanupPoll(self, err):
228 log.err(err, msg="Polling error") 229 self.poller = None
230
231 - def stop(self):
232 self.poll() 233 if self.poller is not None: 234 self.poller.stop() 235 if self.started: 236 self.f.close()
237
238 - def statFile(self):
239 if os.path.exists(self.logfile): 240 s = os.stat(self.logfile) 241 return (s[ST_CTIME], s[ST_MTIME], s[ST_SIZE]) 242 return None
243
244 - def poll(self):
245 if not self.started: 246 s = self.statFile() 247 if s == self.old_logfile_stats: 248 return # not started yet 249 if not s: 250 # the file was there, but now it's deleted. Forget about the 251 # initial state, clearly the process has deleted the logfile 252 # in preparation for creating a new one. 253 self.old_logfile_stats = None 254 return # no file to work with 255 self.f = open(self.logfile, "rb") 256 # if we only want new lines, seek to 257 # where we stat'd so we only find new 258 # lines 259 if self.follow: 260 self.f.seek(s[2], 0) 261 self.started = True 262 self.f.seek(self.f.tell(), 0) 263 while True: 264 data = self.f.read(10000) 265 if not data: 266 return 267 self.command.addLogfile(self.name, data)
268 269
270 -class ShellCommand:
271 # This is a helper class, used by SlaveCommands to run programs in a 272 # child shell. 273 274 notreally = False 275 BACKUP_TIMEOUT = 5 276 KILL = "KILL" 277 CHUNK_LIMIT = 128*1024 278 279 # For sending elapsed time: 280 startTime = None 281 elapsedTime = None 282 # I wish we had easy access to CLOCK_MONOTONIC in Python: 283 # http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html 284 # Then changes to the system clock during a run wouldn't effect the "elapsed 285 # time" results. 286
287 - def __init__(self, builder, command, 288 workdir, environ=None, 289 sendStdout=True, sendStderr=True, sendRC=True, 290 timeout=None, maxTime=None, initialStdin=None, 291 keepStdinOpen=False, keepStdout=False, keepStderr=False, 292 logEnviron=True, logfiles={}, usePTY="slave-config"):
293 """ 294 295 @param keepStdout: if True, we keep a copy of all the stdout text 296 that we've seen. This copy is available in 297 self.stdout, which can be read after the command 298 has finished. 299 @param keepStderr: same, for stderr 300 301 @param usePTY: "slave-config" -> use the SlaveBuilder's usePTY; 302 otherwise, true to use a PTY, false to not use a PTY. 303 """ 304 305 self.builder = builder 306 self.command = Obfuscated.get_real(command) 307 self.fake_command = Obfuscated.get_fake(command) 308 self.sendStdout = sendStdout 309 self.sendStderr = sendStderr 310 self.sendRC = sendRC 311 self.logfiles = logfiles 312 self.workdir = workdir 313 if not os.path.exists(workdir): 314 os.makedirs(workdir) 315 self.environ = os.environ.copy() 316 if environ: 317 if environ.has_key('PYTHONPATH'): 318 ppath = environ['PYTHONPATH'] 319 # Need to do os.pathsep translation. We could either do that 320 # by replacing all incoming ':'s with os.pathsep, or by 321 # accepting lists. I like lists better. 322 if not isinstance(ppath, str): 323 # If it's not a string, treat it as a sequence to be 324 # turned in to a string. 325 ppath = os.pathsep.join(ppath) 326 327 if self.environ.has_key('PYTHONPATH'): 328 # special case, prepend the builder's items to the 329 # existing ones. This will break if you send over empty 330 # strings, so don't do that. 331 ppath = ppath + os.pathsep + self.environ['PYTHONPATH'] 332 333 environ['PYTHONPATH'] = ppath 334 335 self.environ.update(environ) 336 self.initialStdin = initialStdin 337 self.keepStdinOpen = keepStdinOpen 338 self.logEnviron = logEnviron 339 self.timeout = timeout 340 self.timer = None 341 self.maxTime = maxTime 342 self.maxTimer = None 343 self.keepStdout = keepStdout 344 self.keepStderr = keepStderr 345 346 347 if usePTY == "slave-config": 348 self.usePTY = self.builder.usePTY 349 else: 350 self.usePTY = usePTY 351 352 # usePTY=True is a convenience for cleaning up all children and 353 # grandchildren of a hung command. Fall back to usePTY=False on systems 354 # and in situations where ptys cause problems. PTYs are posix-only, 355 # and for .closeStdin to matter, we must use a pipe, not a PTY 356 if runtime.platformType != "posix" or initialStdin is not None: 357 if self.usePTY and usePTY != "slave-config": 358 self.sendStatus({'header': "WARNING: disabling usePTY for this command"}) 359 self.usePTY = False 360 361 self.logFileWatchers = [] 362 for name,filevalue in self.logfiles.items(): 363 filename = filevalue 364 follow = False 365 366 # check for a dictionary of options 367 # filename is required, others are optional 368 if type(filevalue) == dict: 369 filename = filevalue['filename'] 370 follow = filevalue.get('follow', False) 371 372 w = LogFileWatcher(self, name, 373 os.path.join(self.workdir, filename), 374 follow=follow) 375 self.logFileWatchers.append(w)
376
377 - def __repr__(self):
378 return "<slavecommand.ShellCommand '%s'>" % self.fake_command
379
380 - def sendStatus(self, status):
382
383 - def start(self):
384 # return a Deferred which fires (with the exit code) when the command 385 # completes 386 if self.keepStdout: 387 self.stdout = "" 388 if self.keepStderr: 389 self.stderr = "" 390 self.deferred = defer.Deferred() 391 try: 392 self._startCommand() 393 except: 394 log.msg("error in ShellCommand._startCommand") 395 log.err() 396 # pretend it was a shell error 397 self.deferred.errback(AbandonChain(-1)) 398 return self.deferred
399
400 - def _startCommand(self):
401 # ensure workdir exists 402 if not os.path.isdir(self.workdir): 403 os.makedirs(self.workdir) 404 log.msg("ShellCommand._startCommand") 405 if self.notreally: 406 self.sendStatus({'header': "command '%s' in dir %s" % \ 407 (self.fake_command, self.workdir)}) 408 self.sendStatus({'header': "(not really)\n"}) 409 self.finished(None, 0) 410 return 411 412 self.pp = ShellCommandPP(self) 413 414 if type(self.command) in types.StringTypes: 415 if runtime.platformType == 'win32': 416 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args 417 if '/c' not in argv: argv += ['/c'] 418 argv += [self.command] 419 else: 420 # for posix, use /bin/sh. for other non-posix, well, doesn't 421 # hurt to try 422 argv = ['/bin/sh', '-c', self.command] 423 display = self.fake_command 424 else: 425 if runtime.platformType == 'win32' and not self.command[0].lower().endswith(".exe"): 426 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args 427 if '/c' not in argv: argv += ['/c'] 428 argv += list(self.command) 429 else: 430 argv = self.command 431 display = " ".join(self.fake_command) 432 433 # $PWD usually indicates the current directory; spawnProcess may not 434 # update this value, though, so we set it explicitly here. This causes 435 # weird problems (bug #456) on msys, though.. 436 if not self.environ.get('MACHTYPE', None) == 'i686-pc-msys': 437 self.environ['PWD'] = os.path.abspath(self.workdir) 438 439 # self.stdin is handled in ShellCommandPP.connectionMade 440 441 # first header line is the command in plain text, argv joined with 442 # spaces. You should be able to cut-and-paste this into a shell to 443 # obtain the same results. If there are spaces in the arguments, too 444 # bad. 445 log.msg(" " + display) 446 self.sendStatus({'header': display+"\n"}) 447 448 # then comes the secondary information 449 msg = " in dir %s" % (self.workdir,) 450 if self.timeout: 451 msg += " (timeout %d secs)" % (self.timeout,) 452 log.msg(" " + msg) 453 self.sendStatus({'header': msg+"\n"}) 454 455 msg = " watching logfiles %s" % (self.logfiles,) 456 log.msg(" " + msg) 457 self.sendStatus({'header': msg+"\n"}) 458 459 # then the obfuscated command array for resolving unambiguity 460 msg = " argv: %s" % (self.fake_command,) 461 log.msg(" " + msg) 462 self.sendStatus({'header': msg+"\n"}) 463 464 # then the environment, since it sometimes causes problems 465 if self.logEnviron: 466 msg = " environment:\n" 467 env_names = self.environ.keys() 468 env_names.sort() 469 for name in env_names: 470 msg += " %s=%s\n" % (name, self.environ[name]) 471 log.msg(" environment: %s" % (self.environ,)) 472 self.sendStatus({'header': msg}) 473 474 if self.initialStdin: 475 msg = " writing %d bytes to stdin" % len(self.initialStdin) 476 log.msg(" " + msg) 477 self.sendStatus({'header': msg+"\n"}) 478 479 if self.keepStdinOpen: 480 msg = " leaving stdin open" 481 else: 482 msg = " closing stdin" 483 log.msg(" " + msg) 484 self.sendStatus({'header': msg+"\n"}) 485 486 msg = " using PTY: %s" % bool(self.usePTY) 487 log.msg(" " + msg) 488 self.sendStatus({'header': msg+"\n"}) 489 490 # this will be buffered until connectionMade is called 491 if self.initialStdin: 492 self.pp.writeStdin(self.initialStdin) 493 if not self.keepStdinOpen: 494 self.pp.closeStdin() 495 496 # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns 497 # None, as opposed to all the posixbase-derived reactors (which 498 # return the new Process object). This is a nuisance. We can make up 499 # for it by having the ProcessProtocol give us their .transport 500 # attribute after they get one. I'd prefer to get it from 501 # spawnProcess because I'm concerned about returning from this method 502 # without having a valid self.process to work with. (if kill() were 503 # called right after we return, but somehow before connectionMade 504 # were called, then kill() would blow up). 505 self.process = None 506 self.startTime = time.time() 507 508 p = reactor.spawnProcess(self.pp, argv[0], argv, 509 self.environ, 510 self.workdir, 511 usePTY=self.usePTY) 512 # connectionMade might have been called during spawnProcess 513 if not self.process: 514 self.process = p 515 516 # connectionMade also closes stdin as long as we're not using a PTY. 517 # This is intended to kill off inappropriately interactive commands 518 # better than the (long) hung-command timeout. ProcessPTY should be 519 # enhanced to allow the same childFDs argument that Process takes, 520 # which would let us connect stdin to /dev/null . 521 522 if self.timeout: 523 self.timer = reactor.callLater(self.timeout, self.doTimeout) 524 525 if self.maxTime: 526 self.maxTimer = reactor.callLater(self.maxTime, self.doMaxTimeout) 527 528 for w in self.logFileWatchers: 529 w.start()
530 531
532 - def _chunkForSend(self, data):
533 # limit the chunks that we send over PB to 128k, since it has a 534 # hardwired string-size limit of 640k. 535 LIMIT = self.CHUNK_LIMIT 536 for i in range(0, len(data), LIMIT): 537 yield data[i:i+LIMIT]
538
539 - def addStdout(self, data):
540 if self.sendStdout: 541 for chunk in self._chunkForSend(data): 542 self.sendStatus({'stdout': chunk}) 543 if self.keepStdout: 544 self.stdout += data 545 if self.timer: 546 self.timer.reset(self.timeout)
547
548 - def addStderr(self, data):
549 if self.sendStderr: 550 for chunk in self._chunkForSend(data): 551 self.sendStatus({'stderr': chunk}) 552 if self.keepStderr: 553 self.stderr += data 554 if self.timer: 555 self.timer.reset(self.timeout)
556
557 - def addLogfile(self, name, data):
558 for chunk in self._chunkForSend(data): 559 self.sendStatus({'log': (name, chunk)}) 560 if self.timer: 561 self.timer.reset(self.timeout)
562
563 - def finished(self, sig, rc):
564 self.elapsedTime = time.time() - self.startTime 565 log.msg("command finished with signal %s, exit code %s, elapsedTime: %0.6f" % (sig,rc,self.elapsedTime)) 566 for w in self.logFileWatchers: 567 # this will send the final updates 568 w.stop() 569 if sig is not None: 570 rc = -1 571 if self.sendRC: 572 if sig is not None: 573 self.sendStatus( 574 {'header': "process killed by signal %d\n" % sig}) 575 self.sendStatus({'rc': rc}) 576 self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime}) 577 if self.timer: 578 self.timer.cancel() 579 self.timer = None 580 if self.maxTimer: 581 self.maxTimer.cancel() 582 self.maxTimer = None 583 d = self.deferred 584 self.deferred = None 585 if d: 586 d.callback(rc) 587 else: 588 log.msg("Hey, command %s finished twice" % self)
589
590 - def failed(self, why):
591 log.msg("ShellCommand.failed: command failed: %s" % (why,)) 592 if self.timer: 593 self.timer.cancel() 594 self.timer = None 595 if self.maxTimer: 596 self.maxTimer.cancel() 597 self.maxTimer = None 598 d = self.deferred 599 self.deferred = None 600 if d: 601 d.errback(why) 602 else: 603 log.msg("Hey, command %s finished twice" % self)
604
605 - def doTimeout(self):
606 self.timer = None 607 msg = "command timed out: %d seconds without output" % self.timeout 608 self.kill(msg)
609
610 - def doMaxTimeout(self):
611 self.maxTimer = None 612 msg = "command timed out: %d seconds elapsed" % self.maxTime 613 self.kill(msg)
614
615 - def kill(self, msg):
616 # This may be called by the timeout, or when the user has decided to 617 # abort this build. 618 if self.timer: 619 self.timer.cancel() 620 self.timer = None 621 if self.maxTimer: 622 self.maxTimer.cancel() 623 self.maxTimer = None 624 if hasattr(self.process, "pid") and self.process.pid is not None: 625 msg += ", killing pid %s" % self.process.pid 626 log.msg(msg) 627 self.sendStatus({'header': "\n" + msg + "\n"}) 628 629 hit = 0 630 if runtime.platformType == "posix": 631 try: 632 # really want to kill off all child processes too. Process 633 # Groups are ideal for this, but that requires 634 # spawnProcess(usePTY=1). Try both ways in case process was 635 # not started that way. 636 637 # the test suite sets self.KILL=None to tell us we should 638 # only pretend to kill the child. This lets us test the 639 # backup timer. 640 641 sig = None 642 if self.KILL is not None: 643 sig = getattr(signal, "SIG"+ self.KILL, None) 644 645 if self.KILL == None: 646 log.msg("self.KILL==None, only pretending to kill child") 647 elif sig is None: 648 log.msg("signal module is missing SIG%s" % self.KILL) 649 elif not hasattr(os, "kill"): 650 log.msg("os module is missing the 'kill' function") 651 elif not hasattr(self.process, "pid") or self.process.pid is None: 652 log.msg("self.process has no pid") 653 else: 654 log.msg("trying os.kill(-pid, %d)" % (sig,)) 655 # TODO: maybe use os.killpg instead of a negative pid? 656 os.kill(-self.process.pid, sig) 657 log.msg(" signal %s sent successfully" % sig) 658 hit = 1 659 except OSError: 660 # probably no-such-process, maybe because there is no process 661 # group 662 pass 663 if not hit: 664 try: 665 if self.KILL is None: 666 log.msg("self.KILL==None, only pretending to kill child") 667 else: 668 log.msg("trying process.signalProcess('KILL')") 669 self.process.signalProcess(self.KILL) 670 log.msg(" signal %s sent successfully" % (self.KILL,)) 671 hit = 1 672 except OSError: 673 # could be no-such-process, because they finished very recently 674 pass 675 if not hit: 676 log.msg("signalProcess/os.kill failed both times") 677 678 if runtime.platformType == "posix": 679 # we only do this under posix because the win32eventreactor 680 # blocks here until the process has terminated, while closing 681 # stderr. This is weird. 682 self.pp.transport.loseConnection() 683 684 # finished ought to be called momentarily. Just in case it doesn't, 685 # set a timer which will abandon the command. 686 self.timer = reactor.callLater(self.BACKUP_TIMEOUT, 687 self.doBackupTimeout)
688
689 - def doBackupTimeout(self):
690 log.msg("we tried to kill the process, and it wouldn't die.." 691 " finish anyway") 692 self.timer = None 693 self.sendStatus({'header': "SIGKILL failed to kill process\n"}) 694 if self.sendRC: 695 self.sendStatus({'header': "using fake rc=-1\n"}) 696 self.sendStatus({'rc': -1}) 697 self.failed(TimeoutError("SIGKILL failed to kill process"))
698 699
700 - def writeStdin(self, data):
701 self.pp.writeStdin(data)
702
703 - def closeStdin(self):
704 self.pp.closeStdin()
705 706
707 -class Command:
708 implements(ISlaveCommand) 709 710 """This class defines one command that can be invoked by the build master. 711 The command is executed on the slave side, and always sends back a 712 completion message when it finishes. It may also send intermediate status 713 as it runs (by calling builder.sendStatus). Some commands can be 714 interrupted (either by the build master or a local timeout), in which 715 case the step is expected to complete normally with a status message that 716 indicates an error occurred. 717 718 These commands are used by BuildSteps on the master side. Each kind of 719 BuildStep uses a single Command. The slave must implement all the 720 Commands required by the set of BuildSteps used for any given build: 721 this is checked at startup time. 722 723 All Commands are constructed with the same signature: 724 c = CommandClass(builder, args) 725 where 'builder' is the parent SlaveBuilder object, and 'args' is a 726 dict that is interpreted per-command. 727 728 The setup(args) method is available for setup, and is run from __init__. 729 730 The Command is started with start(). This method must be implemented in a 731 subclass, and it should return a Deferred. When your step is done, you 732 should fire the Deferred (the results are not used). If the command is 733 interrupted, it should fire the Deferred anyway. 734 735 While the command runs. it may send status messages back to the 736 buildmaster by calling self.sendStatus(statusdict). The statusdict is 737 interpreted by the master-side BuildStep however it likes. 738 739 A separate completion message is sent when the deferred fires, which 740 indicates that the Command has finished, but does not carry any status 741 data. If the Command needs to return an exit code of some sort, that 742 should be sent as a regular status message before the deferred is fired . 743 Once builder.commandComplete has been run, no more status messages may be 744 sent. 745 746 If interrupt() is called, the Command should attempt to shut down as 747 quickly as possible. Child processes should be killed, new ones should 748 not be started. The Command should send some kind of error status update, 749 then complete as usual by firing the Deferred. 750 751 .interrupted should be set by interrupt(), and can be tested to avoid 752 sending multiple error status messages. 753 754 If .running is False, the bot is shutting down (or has otherwise lost the 755 connection to the master), and should not send any status messages. This 756 is checked in Command.sendStatus . 757 758 """ 759 760 # builder methods: 761 # sendStatus(dict) (zero or more) 762 # commandComplete() or commandInterrupted() (one, at end) 763 764 debug = False 765 interrupted = False 766 running = False # set by Builder, cleared on shutdown or when the 767 # Deferred fires 768
769 - def __init__(self, builder, stepId, args):
770 self.builder = builder 771 self.stepId = stepId # just for logging 772 self.args = args 773 self.setup(args)
774
775 - def setup(self, args):
776 """Override this in a subclass to extract items from the args dict.""" 777 pass
778
779 - def doStart(self):
780 self.running = True 781 d = defer.maybeDeferred(self.start) 782 d.addBoth(self.commandComplete) 783 return d
784
785 - def start(self):
786 """Start the command. This method should return a Deferred that will 787 fire when the command has completed. The Deferred's argument will be 788 ignored. 789 790 This method should be overridden by subclasses.""" 791 raise NotImplementedError, "You must implement this in a subclass"
792
793 - def sendStatus(self, status):
794 """Send a status update to the master.""" 795 if self.debug: 796 log.msg("sendStatus", status) 797 if not self.running: 798 log.msg("would sendStatus but not .running") 799 return 800 self.builder.sendUpdate(status)
801
802 - def doInterrupt(self):
803 self.running = False 804 self.interrupt()
805
806 - def interrupt(self):
807 """Override this in a subclass to allow commands to be interrupted. 808 May be called multiple times, test and set self.interrupted=True if 809 this matters.""" 810 pass
811
812 - def commandComplete(self, res):
813 self.running = False 814 return res
815 816 # utility methods, mostly used by SlaveShellCommand and the like 817
818 - def _abandonOnFailure(self, rc):
819 if type(rc) is not int: 820 log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \ 821 (rc, type(rc))) 822 assert isinstance(rc, int) 823 if rc != 0: 824 raise AbandonChain(rc) 825 return rc
826
827 - def _sendRC(self, res):
828 self.sendStatus({'rc': 0})
829
830 - def _checkAbandoned(self, why):
831 log.msg("_checkAbandoned", why) 832 why.trap(AbandonChain) 833 log.msg(" abandoning chain", why.value) 834 self.sendStatus({'rc': why.value.args[0]}) 835 return None
836 837 838
839 -class SlaveFileUploadCommand(Command):
840 """ 841 Upload a file from slave to build master 842 Arguments: 843 844 - ['workdir']: base directory to use 845 - ['slavesrc']: name of the slave-side file to read from 846 - ['writer']: RemoteReference to a transfer._FileWriter object 847 - ['maxsize']: max size (in bytes) of file to write 848 - ['blocksize']: max size for each data block 849 """ 850 debug = False 851
852 - def setup(self, args):
853 self.workdir = args['workdir'] 854 self.filename = args['slavesrc'] 855 self.writer = args['writer'] 856 self.remaining = args['maxsize'] 857 self.blocksize = args['blocksize'] 858 self.stderr = None 859 self.rc = 0
860
861 - def start(self):
862 if self.debug: 863 log.msg('SlaveFileUploadCommand started') 864 865 # Open file 866 self.path = os.path.join(self.builder.basedir, 867 self.workdir, 868 os.path.expanduser(self.filename)) 869 try: 870 self.fp = open(self.path, 'rb') 871 if self.debug: 872 log.msg('Opened %r for upload' % self.path) 873 except: 874 # TODO: this needs cleanup 875 self.fp = None 876 self.stderr = 'Cannot open file %r for upload' % self.path 877 self.rc = 1 878 if self.debug: 879 log.msg('Cannot open file %r for upload' % self.path) 880 881 self.sendStatus({'header': "sending %s" % self.path}) 882 883 d = defer.Deferred() 884 reactor.callLater(0, self._loop, d) 885 def _close(res): 886 # close the file, but pass through any errors from _loop 887 d1 = self.writer.callRemote("close") 888 d1.addErrback(log.err) 889 d1.addCallback(lambda ignored: res) 890 return d1
891 d.addBoth(_close) 892 d.addBoth(self.finished) 893 return d
894
895 - def _loop(self, fire_when_done):
896 d = defer.maybeDeferred(self._writeBlock) 897 def _done(finished): 898 if finished: 899 fire_when_done.callback(None) 900 else: 901 self._loop(fire_when_done)
902 def _err(why): 903 fire_when_done.errback(why) 904 d.addCallbacks(_done, _err) 905 return None 906
907 - def _writeBlock(self):
908 """Write a block of data to the remote writer""" 909 910 if self.interrupted or self.fp is None: 911 if self.debug: 912 log.msg('SlaveFileUploadCommand._writeBlock(): end') 913 return True 914 915 length = self.blocksize 916 if self.remaining is not None and length > self.remaining: 917 length = self.remaining 918 919 if length <= 0: 920 if self.stderr is None: 921 self.stderr = 'Maximum filesize reached, truncating file %r' \ 922 % self.path 923 self.rc = 1 924 data = '' 925 else: 926 data = self.fp.read(length) 927 928 if self.debug: 929 log.msg('SlaveFileUploadCommand._writeBlock(): '+ 930 'allowed=%d readlen=%d' % (length, len(data))) 931 if len(data) == 0: 932 log.msg("EOF: callRemote(close)") 933 return True 934 935 if self.remaining is not None: 936 self.remaining = self.remaining - len(data) 937 assert self.remaining >= 0 938 d = self.writer.callRemote('write', data) 939 d.addCallback(lambda res: False) 940 return d
941
942 - def interrupt(self):
943 if self.debug: 944 log.msg('interrupted') 945 if self.interrupted: 946 return 947 if self.stderr is None: 948 self.stderr = 'Upload of %r interrupted' % self.path 949 self.rc = 1 950 self.interrupted = True
951 # the next _writeBlock call will notice the .interrupted flag 952
953 - def finished(self, res):
954 if self.debug: 955 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc)) 956 if self.stderr is None: 957 self.sendStatus({'rc': self.rc}) 958 else: 959 self.sendStatus({'stderr': self.stderr, 'rc': self.rc}) 960 return res
961 962 registerSlaveCommand("uploadFile", SlaveFileUploadCommand, command_version) 963 964
965 -class SlaveDirectoryUploadCommand(SlaveFileUploadCommand):
966 """ 967 Upload a directory from slave to build master 968 Arguments: 969 970 - ['workdir']: base directory to use 971 - ['slavesrc']: name of the slave-side directory to read from 972 - ['writer']: RemoteReference to a transfer._DirectoryWriter object 973 - ['maxsize']: max size (in bytes) of file to write 974 - ['blocksize']: max size for each data block 975 - ['compress']: one of [None, 'bz2', 'gz'] 976 """ 977 debug = True 978
979 - def setup(self, args):
980 self.workdir = args['workdir'] 981 self.dirname = args['slavesrc'] 982 self.writer = args['writer'] 983 self.remaining = args['maxsize'] 984 self.blocksize = args['blocksize'] 985 self.compress = args['compress'] 986 self.stderr = None 987 self.rc = 0
988
989 - def start(self):
990 if self.debug: 991 log.msg('SlaveDirectoryUploadCommand started') 992 993 self.path = os.path.join(self.builder.basedir, 994 self.workdir, 995 os.path.expanduser(self.dirname)) 996 if self.debug: 997 log.msg("path: %r" % self.path) 998 999 # Create temporary archive 1000 fd, self.tarname = tempfile.mkstemp() 1001 fileobj = os.fdopen(fd, 'w') 1002 if self.compress == 'bz2': 1003 mode='w|bz2' 1004 elif self.compress == 'gz': 1005 mode='w|gz' 1006 else: 1007 mode = 'w' 1008 archive = tarfile.open(name=self.tarname, mode=mode, fileobj=fileobj) 1009 archive.add(self.path, '') 1010 archive.close() 1011 fileobj.close() 1012 1013 # Transfer it 1014 self.fp = open(self.tarname, 'rb') 1015 1016 self.sendStatus({'header': "sending %s" % self.path}) 1017 1018 d = defer.Deferred() 1019 reactor.callLater(0, self._loop, d) 1020 def unpack(res): 1021 # unpack the archive, but pass through any errors from _loop 1022 d1 = self.writer.callRemote("unpack") 1023 d1.addErrback(log.err) 1024 d1.addCallback(lambda ignored: res) 1025 return d1
1026 d.addCallback(unpack) 1027 d.addBoth(self.finished) 1028 return d
1029
1030 - def finished(self, res):
1031 self.fp.close() 1032 os.remove(self.tarname) 1033 if self.debug: 1034 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc)) 1035 if self.stderr is None: 1036 self.sendStatus({'rc': self.rc}) 1037 else: 1038 self.sendStatus({'stderr': self.stderr, 'rc': self.rc}) 1039 return res
1040 1041 registerSlaveCommand("uploadDirectory", SlaveDirectoryUploadCommand, command_version) 1042 1043
1044 -class SlaveFileDownloadCommand(Command):
1045 """ 1046 Download a file from master to slave 1047 Arguments: 1048 1049 - ['workdir']: base directory to use 1050 - ['slavedest']: name of the slave-side file to be created 1051 - ['reader']: RemoteReference to a transfer._FileReader object 1052 - ['maxsize']: max size (in bytes) of file to write 1053 - ['blocksize']: max size for each data block 1054 - ['mode']: access mode for the new file 1055 """ 1056 debug = False 1057
1058 - def setup(self, args):
1059 self.workdir = args['workdir'] 1060 self.filename = args['slavedest'] 1061 self.reader = args['reader'] 1062 self.bytes_remaining = args['maxsize'] 1063 self.blocksize = args['blocksize'] 1064 self.mode = args['mode'] 1065 self.stderr = None 1066 self.rc = 0
1067
1068 - def start(self):
1069 if self.debug: 1070 log.msg('SlaveFileDownloadCommand starting') 1071 1072 # Open file 1073 self.path = os.path.join(self.builder.basedir, 1074 self.workdir, 1075 os.path.expanduser(self.filename)) 1076 1077 dirname = os.path.dirname(self.path) 1078 if not os.path.exists(dirname): 1079 os.makedirs(dirname) 1080 1081 try: 1082 self.fp = open(self.path, 'wb') 1083 if self.debug: 1084 log.msg('Opened %r for download' % self.path) 1085 if self.mode is not None: 1086 # note: there is a brief window during which the new file 1087 # will have the buildslave's default (umask) mode before we 1088 # set the new one. Don't use this mode= feature to keep files 1089 # private: use the buildslave's umask for that instead. (it 1090 # is possible to call os.umask() before and after the open() 1091 # call, but cleaning up from exceptions properly is more of a 1092 # nuisance that way). 1093 os.chmod(self.path, self.mode) 1094 except IOError: 1095 # TODO: this still needs cleanup 1096 self.fp = None 1097 self.stderr = 'Cannot open file %r for download' % self.path 1098 self.rc = 1 1099 if self.debug: 1100 log.msg('Cannot open file %r for download' % self.path) 1101 1102 d = defer.Deferred() 1103 reactor.callLater(0, self._loop, d) 1104 def _close(res): 1105 # close the file, but pass through any errors from _loop 1106 d1 = self.reader.callRemote('close') 1107 d1.addErrback(log.err) 1108 d1.addCallback(lambda ignored: res) 1109 return d1
1110 d.addBoth(_close) 1111 d.addBoth(self.finished) 1112 return d
1113
1114 - def _loop(self, fire_when_done):
1115 d = defer.maybeDeferred(self._readBlock) 1116 def _done(finished): 1117 if finished: 1118 fire_when_done.callback(None) 1119 else: 1120 self._loop(fire_when_done)
1121 def _err(why): 1122 fire_when_done.errback(why) 1123 d.addCallbacks(_done, _err) 1124 return None 1125
1126 - def _readBlock(self):
1127 """Read a block of data from the remote reader.""" 1128 1129 if self.interrupted or self.fp is None: 1130 if self.debug: 1131 log.msg('SlaveFileDownloadCommand._readBlock(): end') 1132 return True 1133 1134 length = self.blocksize 1135 if self.bytes_remaining is not None and length > self.bytes_remaining: 1136 length = self.bytes_remaining 1137 1138 if length <= 0: 1139 if self.stderr is None: 1140 self.stderr = 'Maximum filesize reached, truncating file %r' \ 1141 % self.path 1142 self.rc = 1 1143 return True 1144 else: 1145 d = self.reader.callRemote('read', length) 1146 d.addCallback(self._writeData) 1147 return d
1148
1149 - def _writeData(self, data):
1150 if self.debug: 1151 log.msg('SlaveFileDownloadCommand._readBlock(): readlen=%d' % 1152 len(data)) 1153 if len(data) == 0: 1154 return True 1155 1156 if self.bytes_remaining is not None: 1157 self.bytes_remaining = self.bytes_remaining - len(data) 1158 assert self.bytes_remaining >= 0 1159 self.fp.write(data) 1160 return False
1161
1162 - def interrupt(self):
1163 if self.debug: 1164 log.msg('interrupted') 1165 if self.interrupted: 1166 return 1167 if self.stderr is None: 1168 self.stderr = 'Download of %r interrupted' % self.path 1169 self.rc = 1 1170 self.interrupted = True
1171 # now we wait for the next read request to return. _readBlock will 1172 # abandon the file when it sees self.interrupted set. 1173
1174 - def finished(self, res):
1175 if self.fp is not None: 1176 self.fp.close() 1177 1178 if self.debug: 1179 log.msg('finished: stderr=%r, rc=%r' % (self.stderr, self.rc)) 1180 if self.stderr is None: 1181 self.sendStatus({'rc': self.rc}) 1182 else: 1183 self.sendStatus({'stderr': self.stderr, 'rc': self.rc}) 1184 return res
1185 1186 registerSlaveCommand("downloadFile", SlaveFileDownloadCommand, command_version) 1187 1188 1189
1190 -class SlaveShellCommand(Command):
1191 """This is a Command which runs a shell command. The args dict contains 1192 the following keys: 1193 1194 - ['command'] (required): a shell command to run. If this is a string, 1195 it will be run with /bin/sh (['/bin/sh', 1196 '-c', command]). If it is a list 1197 (preferred), it will be used directly. 1198 - ['workdir'] (required): subdirectory in which the command will be 1199 run, relative to the builder dir 1200 - ['env']: a dict of environment variables to augment/replace 1201 os.environ . PYTHONPATH is treated specially, and 1202 should be a list of path components to be prepended to 1203 any existing PYTHONPATH environment variable. 1204 - ['initial_stdin']: a string which will be written to the command's 1205 stdin as soon as it starts 1206 - ['keep_stdin_open']: unless True, the command's stdin will be 1207 closed as soon as initial_stdin has been 1208 written. Set this to True if you plan to write 1209 to stdin after the command has been started. 1210 - ['want_stdout']: 0 if stdout should be thrown away 1211 - ['want_stderr']: 0 if stderr should be thrown away 1212 - ['usePTY']: True or False if the command should use a PTY (defaults to 1213 configuration of the slave) 1214 - ['not_really']: 1 to skip execution and return rc=0 1215 - ['timeout']: seconds of silence to tolerate before killing command 1216 - ['maxTime']: seconds before killing command 1217 - ['logfiles']: dict mapping LogFile name to the workdir-relative 1218 filename of a local log file. This local file will be 1219 watched just like 'tail -f', and all changes will be 1220 written to 'log' status updates. 1221 - ['logEnviron']: False to not log the environment variables on the slave 1222 1223 ShellCommand creates the following status messages: 1224 - {'stdout': data} : when stdout data is available 1225 - {'stderr': data} : when stderr data is available 1226 - {'header': data} : when headers (command start/stop) are available 1227 - {'log': (logfile_name, data)} : when log files have new contents 1228 - {'rc': rc} : when the process has terminated 1229 """ 1230
1231 - def start(self):
1232 args = self.args 1233 # args['workdir'] is relative to Builder directory, and is required. 1234 assert args['workdir'] is not None 1235 workdir = os.path.join(self.builder.basedir, args['workdir']) 1236 1237 c = ShellCommand(self.builder, args['command'], 1238 workdir, environ=args.get('env'), 1239 timeout=args.get('timeout', None), 1240 maxTime=args.get('maxTime', None), 1241 sendStdout=args.get('want_stdout', True), 1242 sendStderr=args.get('want_stderr', True), 1243 sendRC=True, 1244 initialStdin=args.get('initial_stdin'), 1245 keepStdinOpen=args.get('keep_stdin_open'), 1246 logfiles=args.get('logfiles', {}), 1247 usePTY=args.get('usePTY', "slave-config"), 1248 logEnviron=args.get('logEnviron', True), 1249 ) 1250 self.command = c 1251 d = self.command.start() 1252 return d
1253
1254 - def interrupt(self):
1255 self.interrupted = True 1256 self.command.kill("command interrupted")
1257
1258 - def writeStdin(self, data):
1259 self.command.writeStdin(data)
1260
1261 - def closeStdin(self):
1262 self.command.closeStdin()
1263 1264 registerSlaveCommand("shell", SlaveShellCommand, command_version) 1265 1266
1267 -class DummyCommand(Command):
1268 """ 1269 I am a dummy no-op command that by default takes 5 seconds to complete. 1270 See L{buildbot.steps.dummy.RemoteDummy} 1271 """ 1272
1273 - def start(self):
1274 self.d = defer.Deferred() 1275 log.msg(" starting dummy command [%s]" % self.stepId) 1276 self.timer = reactor.callLater(1, self.doStatus) 1277 return self.d
1278
1279 - def interrupt(self):
1280 if self.interrupted: 1281 return 1282 self.timer.cancel() 1283 self.timer = None 1284 self.interrupted = True 1285 self.finished()
1286
1287 - def doStatus(self):
1288 log.msg(" sending intermediate status") 1289 self.sendStatus({'stdout': 'data'}) 1290 timeout = self.args.get('timeout', 5) + 1 1291 self.timer = reactor.callLater(timeout - 1, self.finished)
1292
1293 - def finished(self):
1294 log.msg(" dummy command finished [%s]" % self.stepId) 1295 if self.interrupted: 1296 self.sendStatus({'rc': 1}) 1297 else: 1298 self.sendStatus({'rc': 0}) 1299 self.d.callback(0)
1300 1301 registerSlaveCommand("dummy", DummyCommand, command_version) 1302 1303 1304 # this maps handle names to a callable. When the WaitCommand starts, this 1305 # callable is invoked with no arguments. It should return a Deferred. When 1306 # that Deferred fires, our WaitCommand will finish. 1307 waitCommandRegistry = {} 1308
1309 -class WaitCommand(Command):
1310 """ 1311 I am a dummy command used by the buildbot unit test suite. I want for the 1312 unit test to tell us to finish. See L{buildbot.steps.dummy.Wait} 1313 """ 1314
1315 - def start(self):
1316 self.d = defer.Deferred() 1317 log.msg(" starting wait command [%s]" % self.stepId) 1318 handle = self.args['handle'] 1319 cb = waitCommandRegistry[handle] 1320 del waitCommandRegistry[handle] 1321 def _called(): 1322 log.msg(" wait-%s starting" % (handle,)) 1323 d = cb() 1324 def _done(res): 1325 log.msg(" wait-%s finishing: %s" % (handle, res)) 1326 return res
1327 d.addBoth(_done) 1328 d.addCallbacks(self.finished, self.failed)
1329 reactor.callLater(0, _called) 1330 return self.d 1331
1332 - def interrupt(self):
1333 log.msg(" wait command interrupted") 1334 if self.interrupted: 1335 return 1336 self.interrupted = True 1337 self.finished("interrupted")
1338
1339 - def finished(self, res):
1340 log.msg(" wait command finished [%s]" % self.stepId) 1341 if self.interrupted: 1342 self.sendStatus({'rc': 2}) 1343 else: 1344 self.sendStatus({'rc': 0}) 1345 self.d.callback(0)
1346 - def failed(self, why):
1347 log.msg(" wait command failed [%s]" % self.stepId) 1348 self.sendStatus({'rc': 1}) 1349 self.d.callback(0)
1350 1351 registerSlaveCommand("dummy.wait", WaitCommand, command_version) 1352 1353
1354 -class SourceBase(Command):
1355 """Abstract base class for Version Control System operations (checkout 1356 and update). This class extracts the following arguments from the 1357 dictionary received from the master: 1358 1359 - ['workdir']: (required) the subdirectory where the buildable sources 1360 should be placed 1361 1362 - ['mode']: one of update/copy/clobber/export, defaults to 'update' 1363 1364 - ['revision']: If not None, this is an int or string which indicates 1365 which sources (along a time-like axis) should be used. 1366 It is the thing you provide as the CVS -r or -D 1367 argument. 1368 1369 - ['patch']: If not None, this is a tuple of (striplevel, patch) 1370 which contains a patch that should be applied after the 1371 checkout has occurred. Once applied, the tree is no 1372 longer eligible for use with mode='update', and it only 1373 makes sense to use this in conjunction with a 1374 ['revision'] argument. striplevel is an int, and patch 1375 is a string in standard unified diff format. The patch 1376 will be applied with 'patch -p%d <PATCH', with 1377 STRIPLEVEL substituted as %d. The command will fail if 1378 the patch process fails (rejected hunks). 1379 1380 - ['timeout']: seconds of silence tolerated before we kill off the 1381 command 1382 1383 - ['maxTime']: seconds before we kill off the command 1384 1385 - ['retry']: If not None, this is a tuple of (delay, repeats) 1386 which means that any failed VC updates should be 1387 reattempted, up to REPEATS times, after a delay of 1388 DELAY seconds. This is intended to deal with slaves 1389 that experience transient network failures. 1390 """ 1391 1392 sourcedata = "" 1393
1394 - def setup(self, args):
1395 # if we need to parse the output, use this environment. Otherwise 1396 # command output will be in whatever the buildslave's native language 1397 # has been set to. 1398 self.env = os.environ.copy() 1399 self.env['LC_MESSAGES'] = "C" 1400 1401 self.workdir = args['workdir'] 1402 self.mode = args.get('mode', "update") 1403 self.revision = args.get('revision') 1404 self.patch = args.get('patch') 1405 self.timeout = args.get('timeout', 120) 1406 self.maxTime = args.get('maxTime', None) 1407 self.retry = args.get('retry')
1408 # VC-specific subclasses should override this to extract more args. 1409 # Make sure to upcall! 1410
1411 - def start(self):
1412 self.sendStatus({'header': "starting " + self.header + "\n"}) 1413 self.command = None 1414 1415 # self.srcdir is where the VC system should put the sources 1416 if self.mode == "copy": 1417 self.srcdir = "source" # hardwired directory name, sorry 1418 else: 1419 self.srcdir = self.workdir 1420 self.sourcedatafile = os.path.join(self.builder.basedir, 1421 self.srcdir, 1422 ".buildbot-sourcedata") 1423 1424 d = defer.succeed(None) 1425 self.maybeClobber(d) 1426 if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()): 1427 # the directory cannot be updated, so we have to clobber it. 1428 # Perhaps the master just changed modes from 'export' to 1429 # 'update'. 1430 d.addCallback(self.doClobber, self.srcdir) 1431 1432 d.addCallback(self.doVC) 1433 1434 if self.mode == "copy": 1435 d.addCallback(self.doCopy) 1436 if self.patch: 1437 d.addCallback(self.doPatch) 1438 d.addCallbacks(self._sendRC, self._checkAbandoned) 1439 return d
1440
1441 - def maybeClobber(self, d):
1442 # do we need to clobber anything? 1443 if self.mode in ("copy", "clobber", "export"): 1444 d.addCallback(self.doClobber, self.workdir)
1445
1446 - def interrupt(self):
1447 self.interrupted = True 1448 if self.command: 1449 self.command.kill("command interrupted")
1450
1451 - def doVC(self, res):
1452 if self.interrupted: 1453 raise AbandonChain(1) 1454 if self.sourcedirIsUpdateable() and self.sourcedataMatches(): 1455 d = self.doVCUpdate() 1456 d.addCallback(self.maybeDoVCFallback) 1457 else: 1458 d = self.doVCFull() 1459 d.addBoth(self.maybeDoVCRetry) 1460 d.addCallback(self._abandonOnFailure) 1461 d.addCallback(self._handleGotRevision) 1462 d.addCallback(self.writeSourcedata) 1463 return d
1464
1465 - def sourcedataMatches(self):
1466 try: 1467 olddata = self.readSourcedata() 1468 if olddata != self.sourcedata: 1469 return False 1470 except IOError: 1471 return False 1472 return True
1473
1474 - def sourcedirIsPatched(self):
1475 return os.path.exists(os.path.join(self.builder.basedir, 1476 self.workdir, 1477 ".buildbot-patched"))
1478
1479 - def _handleGotRevision(self, res):
1480 d = defer.maybeDeferred(self.parseGotRevision) 1481 d.addCallback(lambda got_revision: 1482 self.sendStatus({'got_revision': got_revision})) 1483 return d
1484
1485 - def parseGotRevision(self):
1486 """Override this in a subclass. It should return a string that 1487 represents which revision was actually checked out, or a Deferred 1488 that will fire with such a string. If, in a future build, you were to 1489 pass this 'got_revision' string in as the 'revision' component of a 1490 SourceStamp, you should wind up with the same source code as this 1491 checkout just obtained. 1492 1493 It is probably most useful to scan self.command.stdout for a string 1494 of some sort. Be sure to set keepStdout=True on the VC command that 1495 you run, so that you'll have something available to look at. 1496 1497 If this information is unavailable, just return None.""" 1498 1499 return None
1500
1501 - def readSourcedata(self):
1502 return open(self.sourcedatafile, "r").read()
1503
1504 - def writeSourcedata(self, res):
1505 open(self.sourcedatafile, "w").write(self.sourcedata) 1506 return res
1507
1508 - def sourcedirIsUpdateable(self):
1509 """Returns True if the tree can be updated.""" 1510 raise NotImplementedError("this must be implemented in a subclass")
1511
1512 - def doVCUpdate(self):
1513 """Returns a deferred with the steps to update a checkout.""" 1514 raise NotImplementedError("this must be implemented in a subclass")
1515
1516 - def doVCFull(self):
1517 """Returns a deferred with the steps to do a fresh checkout.""" 1518 raise NotImplementedError("this must be implemented in a subclass")
1519
1520 - def maybeDoVCFallback(self, rc):
1521 if type(rc) is int and rc == 0: 1522 return rc 1523 if self.interrupted: 1524 raise AbandonChain(1) 1525 msg = "update failed, clobbering and trying again" 1526 self.sendStatus({'header': msg + "\n"}) 1527 log.msg(msg) 1528 d = self.doClobber(None, self.srcdir) 1529 d.addCallback(self.doVCFallback2) 1530 return d
1531
1532 - def doVCFallback2(self, res):
1533 msg = "now retrying VC operation" 1534 self.sendStatus({'header': msg + "\n"}) 1535 log.msg(msg) 1536 d = self.doVCFull() 1537 d.addBoth(self.maybeDoVCRetry) 1538 d.addCallback(self._abandonOnFailure) 1539 return d
1540
1541 - def maybeDoVCRetry(self, res):
1542 """We get here somewhere after a VC chain has finished. res could 1543 be:: 1544 1545 - 0: the operation was successful 1546 - nonzero: the operation failed. retry if possible 1547 - AbandonChain: the operation failed, someone else noticed. retry. 1548 - Failure: some other exception, re-raise 1549 """ 1550 1551 if isinstance(res, failure.Failure): 1552 if self.interrupted: 1553 return res # don't re-try interrupted builds 1554 res.trap(AbandonChain) 1555 else: 1556 if type(res) is int and res == 0: 1557 return res 1558 if self.interrupted: 1559 raise AbandonChain(1) 1560 # if we get here, we should retry, if possible 1561 if self.retry: 1562 delay, repeats = self.retry 1563 if repeats >= 0: 1564 self.retry = (delay, repeats-1) 1565 msg = ("update failed, trying %d more times after %d seconds" 1566 % (repeats, delay)) 1567 self.sendStatus({'header': msg + "\n"}) 1568 log.msg(msg) 1569 d = defer.Deferred() 1570 self.maybeClobber(d) 1571 d.addCallback(lambda res: self.doVCFull()) 1572 d.addBoth(self.maybeDoVCRetry) 1573 reactor.callLater(delay, d.callback, None) 1574 return d 1575 return res
1576
1577 - def doClobber(self, dummy, dirname, chmodDone=False):
1578 # TODO: remove the old tree in the background 1579 ## workdir = os.path.join(self.builder.basedir, self.workdir) 1580 ## deaddir = self.workdir + ".deleting" 1581 ## if os.path.isdir(workdir): 1582 ## try: 1583 ## os.rename(workdir, deaddir) 1584 ## # might fail if deaddir already exists: previous deletion 1585 ## # hasn't finished yet 1586 ## # start the deletion in the background 1587 ## # TODO: there was a solaris/NetApp/NFS problem where a 1588 ## # process that was still running out of the directory we're 1589 ## # trying to delete could prevent the rm-rf from working. I 1590 ## # think it stalled the rm, but maybe it just died with 1591 ## # permission issues. Try to detect this. 1592 ## os.commands("rm -rf %s &" % deaddir) 1593 ## except: 1594 ## # fall back to sequential delete-then-checkout 1595 ## pass 1596 d = os.path.join(self.builder.basedir, dirname) 1597 if runtime.platformType != "posix": 1598 # if we're running on w32, use rmtree instead. It will block, 1599 # but hopefully it won't take too long. 1600 rmdirRecursive(d) 1601 return defer.succeed(0) 1602 command = ["rm", "-rf", d] 1603 c = ShellCommand(self.builder, command, self.builder.basedir, 1604 sendRC=0, timeout=self.timeout, maxTime=self.maxTime, 1605 usePTY=False) 1606 1607 self.command = c 1608 # sendRC=0 means the rm command will send stdout/stderr to the 1609 # master, but not the rc=0 when it finishes. That job is left to 1610 # _sendRC 1611 d = c.start() 1612 # The rm -rf may fail if there is a left-over subdir with chmod 000 1613 # permissions. So if we get a failure, we attempt to chmod suitable 1614 # permissions and re-try the rm -rf. 1615 if chmodDone: 1616 d.addCallback(self._abandonOnFailure) 1617 else: 1618 d.addCallback(lambda rc: self.doClobberTryChmodIfFail(rc, dirname)) 1619 return d
1620
1621 - def doClobberTryChmodIfFail(self, rc, dirname):
1622 assert isinstance(rc, int) 1623 if rc == 0: 1624 return defer.succeed(0) 1625 # Attempt a recursive chmod and re-try the rm -rf after. 1626 command = ["chmod", "-R", "u+rwx", os.path.join(self.builder.basedir, dirname)] 1627 c = ShellCommand(self.builder, command, self.builder.basedir, 1628 sendRC=0, timeout=self.timeout, maxTime=self.maxTime, 1629 usePTY=False) 1630 1631 self.command = c 1632 d = c.start() 1633 d.addCallback(self._abandonOnFailure) 1634 d.addCallback(lambda dummy: self.doClobber(dummy, dirname, True)) 1635 return d
1636
1637 - def doCopy(self, res):
1638 # now copy tree to workdir 1639 fromdir = os.path.join(self.builder.basedir, self.srcdir) 1640 todir = os.path.join(self.builder.basedir, self.workdir) 1641 if runtime.platformType != "posix": 1642 self.sendStatus({'header': "Since we're on a non-POSIX platform, " 1643 "we're not going to try to execute cp in a subprocess, but instead " 1644 "use shutil.copytree(), which will block until it is complete. " 1645 "fromdir: %s, todir: %s\n" % (fromdir, todir)}) 1646 shutil.copytree(fromdir, todir) 1647 return defer.succeed(0) 1648 1649 if not os.path.exists(os.path.dirname(todir)): 1650 os.makedirs(os.path.dirname(todir)) 1651 if os.path.exists(todir): 1652 # I don't think this happens, but just in case.. 1653 log.msg("cp target '%s' already exists -- cp will not do what you think!" % todir) 1654 1655 command = ['cp', '-R', '-P', '-p', fromdir, todir] 1656 c = ShellCommand(self.builder, command, self.builder.basedir, 1657 sendRC=False, timeout=self.timeout, maxTime=self.maxTime, 1658 usePTY=False) 1659 self.command = c 1660 d = c.start() 1661 d.addCallback(self._abandonOnFailure) 1662 return d
1663
1664 - def doPatch(self, res):
1665 patchlevel = self.patch[0] 1666 diff = self.patch[1] 1667 root = None 1668 if len(self.patch) >= 3: 1669 root = self.patch[2] 1670 command = [ 1671 getCommand("patch"), 1672 '-p%d' % patchlevel, 1673 '--remove-empty-files', 1674 '--force', 1675 '--forward', 1676 ] 1677 dir = os.path.join(self.builder.basedir, self.workdir) 1678 # Mark the directory so we don't try to update it later, or at least try 1679 # to revert first. 1680 marker = open(os.path.join(dir, ".buildbot-patched"), "w") 1681 marker.write("patched\n") 1682 marker.close() 1683 1684 # Update 'dir' with the 'root' option. Make sure it is a subdirectory 1685 # of dir. 1686 if (root and 1687 os.path.abspath(os.path.join(dir, root) 1688 ).startswith(os.path.abspath(dir))): 1689 dir = os.path.join(dir, root) 1690 1691 # now apply the patch 1692 c = ShellCommand(self.builder, command, dir, 1693 sendRC=False, timeout=self.timeout, 1694 maxTime=self.maxTime, initialStdin=diff, usePTY=False) 1695 self.command = c 1696 d = c.start() 1697 d.addCallback(self._abandonOnFailure) 1698 return d
1699 1700
1701 -class CVS(SourceBase):
1702 """CVS-specific VC operation. In addition to the arguments handled by 1703 SourceBase, this command reads the following keys: 1704 1705 ['cvsroot'] (required): the CVSROOT repository string 1706 ['cvsmodule'] (required): the module to be retrieved 1707 ['branch']: a '-r' tag or branch name to use for the checkout/update 1708 ['login']: a string for use as a password to 'cvs login' 1709 ['global_options']: a list of strings to use before the CVS verb 1710 ['checkout_options']: a list of strings to use after checkout, 1711 but before revision and branch specifiers 1712 """ 1713 1714 header = "cvs operation" 1715
1716 - def setup(self, args):
1717 SourceBase.setup(self, args) 1718 self.vcexe = getCommand("cvs") 1719 self.cvsroot = args['cvsroot'] 1720 self.cvsmodule = args['cvsmodule'] 1721 self.global_options = args.get('global_options', []) 1722 self.checkout_options = args.get('checkout_options', []) 1723 self.branch = args.get('branch') 1724 self.login = args.get('login') 1725 self.sourcedata = "%s\n%s\n%s\n" % (self.cvsroot, self.cvsmodule, 1726 self.branch)
1727
1728 - def sourcedirIsUpdateable(self):
1729 return (not self.sourcedirIsPatched() and 1730 os.path.isdir(os.path.join(self.builder.basedir, 1731 self.srcdir, "CVS")))
1732
1733 - def start(self):
1734 if self.login is not None: 1735 # need to do a 'cvs login' command first 1736 d = self.builder.basedir 1737 command = ([self.vcexe, '-d', self.cvsroot] + self.global_options 1738 + ['login']) 1739 c = ShellCommand(self.builder, command, d, 1740 sendRC=False, timeout=self.timeout, 1741 maxTime=self.maxTime, 1742 initialStdin=self.login+"\n", usePTY=False) 1743 self.command = c 1744 d = c.start() 1745 d.addCallback(self._abandonOnFailure) 1746 d.addCallback(self._didLogin) 1747 return d 1748 else: 1749 return self._didLogin(None)
1750
1751 - def _didLogin(self, res):
1752 # now we really start 1753 return SourceBase.start(self)
1754
1755 - def doVCUpdate(self):
1756 d = os.path.join(self.builder.basedir, self.srcdir) 1757 command = [self.vcexe, '-z3'] + self.global_options + ['update', '-dP'] 1758 if self.branch: 1759 command += ['-r', self.branch] 1760 if self.revision: 1761 command += ['-D', self.revision] 1762 c = ShellCommand(self.builder, command, d, 1763 sendRC=False, timeout=self.timeout, 1764 maxTime=self.maxTime, usePTY=False) 1765 self.command = c 1766 return c.start()
1767
1768 - def doVCFull(self):
1769 d = self.builder.basedir 1770 if self.mode == "export": 1771 verb = "export" 1772 else: 1773 verb = "checkout" 1774 command = ([self.vcexe, '-d', self.cvsroot, '-z3'] + 1775 self.global_options + 1776 [verb, '-d', self.srcdir]) 1777 1778 if verb == "checkout": 1779 command += self.checkout_options 1780 if self.branch: 1781 command += ['-r', self.branch] 1782 if self.revision: 1783 command += ['-D', self.revision] 1784 command += [self.cvsmodule] 1785 1786 c = ShellCommand(self.builder, command, d, 1787 sendRC=False, timeout=self.timeout, 1788 maxTime=self.maxTime, usePTY=False) 1789 self.command = c 1790 return c.start()
1791
1792 - def parseGotRevision(self):
1793 # CVS does not have any kind of revision stamp to speak of. We return 1794 # the current timestamp as a best-effort guess, but this depends upon 1795 # the local system having a clock that is 1796 # reasonably-well-synchronized with the repository. 1797 return time.strftime("%Y-%m-%d %H:%M:%S +0000", time.gmtime())
1798 1799 registerSlaveCommand("cvs", CVS, command_version) 1800
1801 -class SVN(SourceBase):
1802 """Subversion-specific VC operation. In addition to the arguments 1803 handled by SourceBase, this command reads the following keys: 1804 1805 ['svnurl'] (required): the SVN repository string 1806 ['username']: Username passed to the svn command 1807 ['password']: Password passed to the svn command 1808 ['keep_on_purge']: Files and directories to keep between updates 1809 ['ignore_ignores']: Ignore ignores when purging changes 1810 ['always_purge']: Always purge local changes after each build 1811 ['depth']: Pass depth argument to subversion 1.5+ 1812 """ 1813 1814 header = "svn operation" 1815
1816 - def setup(self, args):
1817 SourceBase.setup(self, args) 1818 self.vcexe = getCommand("svn") 1819 self.svnurl = args['svnurl'] 1820 self.sourcedata = "%s\n" % self.svnurl 1821 self.keep_on_purge = args.get('keep_on_purge', []) 1822 self.keep_on_purge.append(".buildbot-sourcedata") 1823 self.ignore_ignores = args.get('ignore_ignores', True) 1824 self.always_purge = args.get('always_purge', False) 1825 1826 self.svn_args = [] 1827 if args.has_key('username'): 1828 self.svn_args.extend(["--username", args['username']]) 1829 if args.has_key('password'): 1830 self.svn_args.extend(["--password", Obfuscated(args['password'], "XXXX")]) 1831 if args.get('extra_args', None) is not None: 1832 self.svn_args.extend(args['extra_args']) 1833 1834 if args.has_key('depth'): 1835 self.svn_args.extend(["--depth",args['depth']])
1836
1837 - def _dovccmd(self, command, args, rootdir=None, cb=None, **kwargs):
1838 if rootdir is None: 1839 rootdir = os.path.join(self.builder.basedir, self.srcdir) 1840 fullCmd = [self.vcexe, command, '--non-interactive', '--no-auth-cache'] 1841 fullCmd.extend(self.svn_args) 1842 fullCmd.extend(args) 1843 c = ShellCommand(self.builder, fullCmd, rootdir, 1844 environ=self.env, sendRC=False, timeout=self.timeout, 1845 maxTime=self.maxTime, usePTY=False, **kwargs) 1846 self.command = c 1847 d = c.start() 1848 if cb: 1849 d.addCallback(self._abandonOnFailure) 1850 d.addCallback(cb) 1851 return d
1852
1853 - def sourcedirIsUpdateable(self):
1854 return os.path.isdir(os.path.join(self.builder.basedir, 1855 self.srcdir, ".svn"))
1856
1857 - def doVCUpdate(self):
1858 if self.sourcedirIsPatched() or self.always_purge: 1859 return self._purgeAndUpdate() 1860 revision = self.args['revision'] or 'HEAD' 1861 # update: possible for mode in ('copy', 'update') 1862 return self._dovccmd('update', ['--revision', str(revision)], 1863 keepStdout=True)
1864
1865 - def doVCFull(self):
1866 revision = self.args['revision'] or 'HEAD' 1867 args = ['--revision', str(revision), self.svnurl, self.srcdir] 1868 if self.mode == "export": 1869 command = 'export' 1870 else: 1871 # mode=='clobber', or copy/update on a broken workspace 1872 command = 'checkout' 1873 return self._dovccmd(command, args, rootdir=self.builder.basedir, 1874 keepStdout=True)
1875
1876 - def _purgeAndUpdate(self):
1877 """svn revert has several corner cases that make it unpractical. 1878 1879 Use the Force instead and delete everything that shows up in status.""" 1880 args = ['--xml'] 1881 if self.ignore_ignores: 1882 args.append('--no-ignore') 1883 return self._dovccmd('status', args, keepStdout=True, sendStdout=False, 1884 cb=self._purgeAndUpdate2)
1885
1886 - def _purgeAndUpdate2(self, res):
1887 """Delete everything that shown up on status.""" 1888 result_xml = parseString(self.command.stdout) 1889 for entry in result_xml.getElementsByTagName('entry'): 1890 filename = entry.getAttribute('path') 1891 if filename in self.keep_on_purge: 1892 continue 1893 filepath = os.path.join(self.builder.basedir, self.workdir, 1894 filename) 1895 self.sendStatus({'stdout': "%s\n" % filepath}) 1896 if os.path.isfile(filepath): 1897 os.chmod(filepath, 0700) 1898 os.remove(filepath) 1899 else: 1900 rmdirRecursive(filepath) 1901 # Now safe to update. 1902 revision = self.args['revision'] or 'HEAD' 1903 return self._dovccmd('update', ['--revision', str(revision)], 1904 keepStdout=True)
1905
1906 - def getSvnVersionCommand(self):
1907 """ 1908 Get the (shell) command used to determine SVN revision number 1909 of checked-out code 1910 1911 return: list of strings, passable as the command argument to ShellCommand 1912 """ 1913 # svn checkout operations finish with 'Checked out revision 16657.' 1914 # svn update operations finish the line 'At revision 16654.' 1915 # But we don't use those. Instead, run 'svnversion'. 1916 svnversion_command = getCommand("svnversion") 1917 # older versions of 'svnversion' (1.1.4) require the WC_PATH 1918 # argument, newer ones (1.3.1) do not. 1919 return [svnversion_command, "."]
1920
1921 - def parseGotRevision(self):
1922 c = ShellCommand(self.builder, 1923 self.getSvnVersionCommand(), 1924 os.path.join(self.builder.basedir, self.srcdir), 1925 environ=self.env, 1926 sendStdout=False, sendStderr=False, sendRC=False, 1927 keepStdout=True, usePTY=False) 1928 d = c.start() 1929 def _parse(res): 1930 r_raw = c.stdout.strip() 1931 # Extract revision from the version "number" string 1932 r = r_raw.rstrip('MS') 1933 r = r.split(':')[-1] 1934 got_version = None 1935 try: 1936 got_version = int(r) 1937 except ValueError: 1938 msg =("SVN.parseGotRevision unable to parse output " 1939 "of svnversion: '%s'" % r_raw) 1940 log.msg(msg) 1941 self.sendStatus({'header': msg + "\n"}) 1942 return got_version
1943 d.addCallback(_parse) 1944 return d
1945 1946 1947 registerSlaveCommand("svn", SVN, command_version) 1948
1949 -class Darcs(SourceBase):
1950 """Darcs-specific VC operation. In addition to the arguments 1951 handled by SourceBase, this command reads the following keys: 1952 1953 ['repourl'] (required): the Darcs repository string 1954 """ 1955 1956 header = "darcs operation" 1957
1958 - def setup(self, args):
1959 SourceBase.setup(self, args) 1960 self.vcexe = getCommand("darcs") 1961 self.repourl = args['repourl'] 1962 self.sourcedata = "%s\n" % self.repourl 1963 self.revision = self.args.get('revision')
1964
1965 - def sourcedirIsUpdateable(self):
1966 # checking out a specific revision requires a full 'darcs get' 1967 return (not self.revision and 1968 not self.sourcedirIsPatched() and 1969 os.path.isdir(os.path.join(self.builder.basedir, 1970 self.srcdir, "_darcs")))
1971
1972 - def doVCUpdate(self):
1973 assert not self.revision 1974 # update: possible for mode in ('copy', 'update') 1975 d = os.path.join(self.builder.basedir, self.srcdir) 1976 command = [self.vcexe, 'pull', '--all', '--verbose'] 1977 c = ShellCommand(self.builder, command, d, 1978 sendRC=False, timeout=self.timeout, 1979 maxTime=self.maxTime, usePTY=False) 1980 self.command = c 1981 return c.start()
1982
1983 - def doVCFull(self):
1984 # checkout or export 1985 d = self.builder.basedir 1986 command = [self.vcexe, 'get', '--verbose', '--partial', 1987 '--repo-name', self.srcdir] 1988 if self.revision: 1989 # write the context to a file 1990 n = os.path.join(self.builder.basedir, ".darcs-context") 1991 f = open(n, "wb") 1992 f.write(self.revision) 1993 f.close() 1994 # tell Darcs to use that context 1995 command.append('--context') 1996 command.append(n) 1997 command.append(self.repourl) 1998 1999 c = ShellCommand(self.builder, command, d, 2000 sendRC=False, timeout=self.timeout, 2001 maxTime=self.maxTime, usePTY=False) 2002 self.command = c 2003 d = c.start() 2004 if self.revision: 2005 d.addCallback(self.removeContextFile, n) 2006 return d
2007
2008 - def removeContextFile(self, res, n):
2009 os.unlink(n) 2010 return res
2011
2012 - def parseGotRevision(self):
2013 # we use 'darcs context' to find out what we wound up with 2014 command = [self.vcexe, "changes", "--context"] 2015 c = ShellCommand(self.builder, command, 2016 os.path.join(self.builder.basedir, self.srcdir), 2017 environ=self.env, 2018 sendStdout=False, sendStderr=False, sendRC=False, 2019 keepStdout=True, usePTY=False) 2020 d = c.start() 2021 d.addCallback(lambda res: c.stdout) 2022 return d
2023 2024 registerSlaveCommand("darcs", Darcs, command_version) 2025
2026 -class Monotone(SourceBase):
2027 """Monotone-specific VC operation. In addition to the arguments handled 2028 by SourceBase, this command reads the following keys: 2029 2030 ['server_addr'] (required): the address of the server to pull from 2031 ['branch'] (required): the branch the revision is on 2032 ['db_path'] (required): the local database path to use 2033 ['revision'] (required): the revision to check out 2034 ['monotone']: (required): path to monotone executable 2035 """ 2036 2037 header = "monotone operation" 2038
2039 - def setup(self, args):
2040 SourceBase.setup(self, args) 2041 self.server_addr = args["server_addr"] 2042 self.branch = args["branch"] 2043 self.db_path = args["db_path"] 2044 self.revision = args["revision"] 2045 self.monotone = args["monotone"] 2046 self._made_fulls = False 2047 self._pull_timeout = args["timeout"]
2048
2049 - def _makefulls(self):
2050 if not self._made_fulls: 2051 basedir = self.builder.basedir 2052 self.full_db_path = os.path.join(basedir, self.db_path) 2053 self.full_srcdir = os.path.join(basedir, self.srcdir) 2054 self._made_fulls = True
2055
2056 - def sourcedirIsUpdateable(self):
2057 self._makefulls() 2058 return (not self.sourcedirIsPatched() and 2059 os.path.isfile(self.full_db_path) and 2060 os.path.isdir(os.path.join(self.full_srcdir, "MT")))
2061
2062 - def doVCUpdate(self):
2063 return self._withFreshDb(self._doUpdate)
2064
2065 - def _doUpdate(self):
2066 # update: possible for mode in ('copy', 'update') 2067 command = [self.monotone, "update", 2068 "-r", self.revision, 2069 "-b", self.branch] 2070 c = ShellCommand(self.builder, command, self.full_srcdir, 2071 sendRC=False, timeout=self.timeout, 2072 maxTime=self.maxTime, usePTY=False) 2073 self.command = c 2074 return c.start()
2075
2076 - def doVCFull(self):
2077 return self._withFreshDb(self._doFull)
2078
2079 - def _doFull(self):
2080 command = [self.monotone, "--db=" + self.full_db_path, 2081 "checkout", 2082 "-r", self.revision, 2083 "-b", self.branch, 2084 self.full_srcdir] 2085 c = ShellCommand(self.builder, command, self.builder.basedir, 2086 sendRC=False, timeout=self.timeout, 2087 maxTime=self.maxTime, usePTY=False) 2088 self.command = c 2089 return c.start()
2090
2091 - def _withFreshDb(self, callback):
2092 self._makefulls() 2093 # first ensure the db exists and is usable 2094 if os.path.isfile(self.full_db_path): 2095 # already exists, so run 'db migrate' in case monotone has been 2096 # upgraded under us 2097 command = [self.monotone, "db", "migrate", 2098 "--db=" + self.full_db_path] 2099 else: 2100 # We'll be doing an initial pull, so up the timeout to 3 hours to 2101 # make sure it will have time to complete. 2102 self._pull_timeout = max(self._pull_timeout, 3 * 60 * 60) 2103 self.sendStatus({"header": "creating database %s\n" 2104 % (self.full_db_path,)}) 2105 command = [self.monotone, "db", "init", 2106 "--db=" + self.full_db_path] 2107 c = ShellCommand(self.builder, command, self.builder.basedir, 2108 sendRC=False, timeout=self.timeout, 2109 maxTime=self.maxTime, usePTY=False) 2110 self.command = c 2111 d = c.start() 2112 d.addCallback(self._abandonOnFailure) 2113 d.addCallback(self._didDbInit) 2114 d.addCallback(self._didPull, callback) 2115 return d
2116
2117 - def _didDbInit(self, res):
2118 command = [self.monotone, "--db=" + self.full_db_path, 2119 "pull", "--ticker=dot", self.server_addr, self.branch] 2120 c = ShellCommand(self.builder, command, self.builder.basedir, 2121 sendRC=False, timeout=self._pull_timeout, 2122 maxTime=self.maxTime, usePTY=False) 2123 self.sendStatus({"header": "pulling %s from %s\n" 2124 % (self.branch, self.server_addr)}) 2125 self.command = c 2126 return c.start()
2127
2128 - def _didPull(self, res, callback):
2129 return callback()
2130 2131 registerSlaveCommand("monotone", Monotone, command_version) 2132 2133
2134 -class Git(SourceBase):
2135 """Git specific VC operation. In addition to the arguments 2136 handled by SourceBase, this command reads the following keys: 2137 2138 ['repourl'] (required): the upstream GIT repository string 2139 ['branch'] (optional): which version (i.e. branch or tag) to 2140 retrieve. Default: "master". 2141 ['submodules'] (optional): whether to initialize and update 2142 submodules. Default: False. 2143 ['ignore_ignores']: ignore ignores when purging changes. 2144 """ 2145 2146 header = "git operation" 2147
2148 - def setup(self, args):
2149 SourceBase.setup(self, args) 2150 self.vcexe = getCommand("git") 2151 self.repourl = args['repourl'] 2152 self.branch = args.get('branch') 2153 if not self.branch: 2154 self.branch = "master" 2155 self.sourcedata = "%s %s\n" % (self.repourl, self.branch) 2156 self.submodules = args.get('submodules') 2157 self.ignore_ignores = args.get('ignore_ignores', True)
2158
2159 - def _fullSrcdir(self):
2160 return os.path.join(self.builder.basedir, self.srcdir)
2161
2162 - def _commitSpec(self):
2163 if self.revision: 2164 return self.revision 2165 return self.branch
2166
2167 - def sourcedirIsUpdateable(self):
2168 return os.path.isdir(os.path.join(self._fullSrcdir(), ".git"))
2169
2170 - def _dovccmd(self, command, cb=None, **kwargs):
2171 c = ShellCommand(self.builder, [self.vcexe] + command, self._fullSrcdir(), 2172 sendRC=False, timeout=self.timeout, 2173 maxTime=self.maxTime, usePTY=False, **kwargs) 2174 self.command = c 2175 d = c.start() 2176 if cb: 2177 d.addCallback(self._abandonOnFailure) 2178 d.addCallback(cb) 2179 return d
2180 2181 # If the repourl matches the sourcedata file, then 2182 # we can say that the sourcedata matches. We can 2183 # ignore branch changes, since Git can work with 2184 # many branches fetched, and we deal with it properly 2185 # in doVCUpdate.
2186 - def sourcedataMatches(self):
2187 try: 2188 olddata = self.readSourcedata() 2189 if not olddata.startswith(self.repourl+' '): 2190 return False 2191 except IOError: 2192 return False 2193 return True
2194
2195 - def _cleanSubmodules(self, res):
2196 command = ['submodule', 'foreach', 'git', 'clean', '-d', '-f'] 2197 if self.ignore_ignores: 2198 command.append('-x') 2199 return self._dovccmd(command)
2200
2201 - def _updateSubmodules(self, res):
2202 return self._dovccmd(['submodule', 'update'], self._cleanSubmodules)
2203
2204 - def _initSubmodules(self, res):
2205 if self.submodules: 2206 return self._dovccmd(['submodule', 'init'], self._updateSubmodules) 2207 else: 2208 return defer.succeed(0)
2209
2210 - def _didHeadCheckout(self, res):
2211 # Rename branch, so that the repo will have the expected branch name 2212 # For further information about this, see the commit message 2213 command = ['branch', '-M', self.branch] 2214 return self._dovccmd(command, self._initSubmodules)
2215
2216 - def _didFetch(self, res):
2217 if self.revision: 2218 head = self.revision 2219 else: 2220 head = 'FETCH_HEAD' 2221 2222 # That is not sufficient. git will leave unversioned files and empty 2223 # directories. Clean them up manually in _didReset. 2224 command = ['reset', '--hard', head] 2225 return self._dovccmd(command, self._didHeadCheckout)
2226 2227 # Update first runs "git clean", removing local changes, 2228 # if the branch to be checked out has changed. This, combined 2229 # with the later "git reset" equates clobbering the repo, 2230 # but it's much more efficient.
2231 - def doVCUpdate(self):
2232 try: 2233 # Check to see if our branch has changed 2234 diffbranch = self.sourcedata != self.readSourcedata() 2235 except IOError: 2236 diffbranch = False 2237 if diffbranch: 2238 command = ['git', 'clean', '-f', '-d'] 2239 if self.ignore_ignores: 2240 command.append('-x') 2241 c = ShellCommand(self.builder, command, self._fullSrcdir(), 2242 sendRC=False, timeout=self.timeout, usePTY=False) 2243 self.command = c 2244 d = c.start() 2245 d.addCallback(self._abandonOnFailure) 2246 d.addCallback(self._didClean) 2247 return d 2248 return self._didClean(None)
2249
2250 - def _doFetch(self, dummy):
2251 # The plus will make sure the repo is moved to the branch's 2252 # head even if it is not a simple "fast-forward" 2253 command = ['fetch', '-t', self.repourl, '+%s' % self.branch] 2254 self.sendStatus({"header": "fetching branch %s from %s\n" 2255 % (self.branch, self.repourl)}) 2256 return self._dovccmd(command, self._didFetch)
2257
2258 - def _didClean(self, dummy):
2259 # After a clean, try to use the given revision if we have one. 2260 if self.revision: 2261 # We know what revision we want. See if we have it. 2262 d = self._dovccmd(['reset', '--hard', self.revision], 2263 self._initSubmodules) 2264 # If we are unable to reset to the specified version, we 2265 # must do a fetch first and retry. 2266 d.addErrback(self._doFetch) 2267 return d 2268 else: 2269 # No known revision, go grab the latest. 2270 return self._doFetch(None)
2271
2272 - def _didInit(self, res):
2273 return self.doVCUpdate()
2274
2275 - def doVCFull(self):
2276 os.makedirs(self._fullSrcdir()) 2277 return self._dovccmd(['init'], self._didInit)
2278
2279 - def parseGotRevision(self):
2280 command = ['rev-parse', 'HEAD'] 2281 def _parse(res): 2282 hash = self.command.stdout.strip() 2283 if len(hash) != 40: 2284 return None 2285 return hash
2286 return self._dovccmd(command, _parse, keepStdout=True)
2287 2288 registerSlaveCommand("git", Git, command_version) 2289
2290 -class Arch(SourceBase):
2291 """Arch-specific (tla-specific) VC operation. In addition to the 2292 arguments handled by SourceBase, this command reads the following keys: 2293 2294 ['url'] (required): the repository string 2295 ['version'] (required): which version (i.e. branch) to retrieve 2296 ['revision'] (optional): the 'patch-NN' argument to check out 2297 ['archive']: the archive name to use. If None, use the archive's default 2298 ['build-config']: if present, give to 'tla build-config' after checkout 2299 """ 2300 2301 header = "arch operation" 2302 buildconfig = None 2303
2304 - def setup(self, args):
2305 SourceBase.setup(self, args) 2306 self.vcexe = getCommand("tla") 2307 self.archive = args.get('archive') 2308 self.url = args['url'] 2309 self.version = args['version'] 2310 self.revision = args.get('revision') 2311 self.buildconfig = args.get('build-config') 2312 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version, 2313 self.buildconfig)
2314
2315 - def sourcedirIsUpdateable(self):
2316 # Arch cannot roll a directory backwards, so if they ask for a 2317 # specific revision, clobber the directory. Technically this 2318 # could be limited to the cases where the requested revision is 2319 # later than our current one, but it's too hard to extract the 2320 # current revision from the tree. 2321 return (not self.revision and 2322 not self.sourcedirIsPatched() and 2323 os.path.isdir(os.path.join(self.builder.basedir, 2324 self.srcdir, "{arch}")))
2325
2326 - def doVCUpdate(self):
2327 # update: possible for mode in ('copy', 'update') 2328 d = os.path.join(self.builder.basedir, self.srcdir) 2329 command = [self.vcexe, 'replay'] 2330 if self.revision: 2331 command.append(self.revision) 2332 c = ShellCommand(self.builder, command, d, 2333 sendRC=False, timeout=self.timeout, 2334 maxTime=self.maxTime, usePTY=False) 2335 self.command = c 2336 return c.start()
2337
2338 - def doVCFull(self):
2339 # to do a checkout, we must first "register" the archive by giving 2340 # the URL to tla, which will go to the repository at that URL and 2341 # figure out the archive name. tla will tell you the archive name 2342 # when it is done, and all further actions must refer to this name. 2343 2344 command = [self.vcexe, 'register-archive', '--force', self.url] 2345 c = ShellCommand(self.builder, command, self.builder.basedir, 2346 sendRC=False, keepStdout=True, timeout=self.timeout, 2347 maxTime=self.maxTime, usePTY=False) 2348 self.command = c 2349 d = c.start() 2350 d.addCallback(self._abandonOnFailure) 2351 d.addCallback(self._didRegister, c) 2352 return d
2353
2354 - def _didRegister(self, res, c):
2355 # find out what tla thinks the archive name is. If the user told us 2356 # to use something specific, make sure it matches. 2357 r = re.search(r'Registering archive: (\S+)\s*$', c.stdout) 2358 if r: 2359 msg = "tla reports archive name is '%s'" % r.group(1) 2360 log.msg(msg) 2361 self.builder.sendUpdate({'header': msg+"\n"}) 2362 if self.archive and r.group(1) != self.archive: 2363 msg = (" mismatch, we wanted an archive named '%s'" 2364 % self.archive) 2365 log.msg(msg) 2366 self.builder.sendUpdate({'header': msg+"\n"}) 2367 raise AbandonChain(-1) 2368 self.archive = r.group(1) 2369 assert self.archive, "need archive name to continue" 2370 return self._doGet()
2371
2372 - def _doGet(self):
2373 ver = self.version 2374 if self.revision: 2375 ver += "--%s" % self.revision 2376 command = [self.vcexe, 'get', '--archive', self.archive, 2377 '--no-pristine', 2378 ver, self.srcdir] 2379 c = ShellCommand(self.builder, command, self.builder.basedir, 2380 sendRC=False, timeout=self.timeout, 2381 maxTime=self.maxTime, usePTY=False) 2382 self.command = c 2383 d = c.start() 2384 d.addCallback(self._abandonOnFailure) 2385 if self.buildconfig: 2386 d.addCallback(self._didGet) 2387 return d
2388
2389 - def _didGet(self, res):
2390 d = os.path.join(self.builder.basedir, self.srcdir) 2391 command = [self.vcexe, 'build-config', self.buildconfig] 2392 c = ShellCommand(self.builder, command, d, 2393 sendRC=False, timeout=self.timeout, 2394 maxTime=self.maxTime, usePTY=False) 2395 self.command = c 2396 d = c.start() 2397 d.addCallback(self._abandonOnFailure) 2398 return d
2399
2400 - def parseGotRevision(self):
2401 # using code from tryclient.TlaExtractor 2402 # 'tla logs --full' gives us ARCHIVE/BRANCH--REVISION 2403 # 'tla logs' gives us REVISION 2404 command = [self.vcexe, "logs", "--full", "--reverse"] 2405 c = ShellCommand(self.builder, command, 2406 os.path.join(self.builder.basedir, self.srcdir), 2407 environ=self.env, 2408 sendStdout=False, sendStderr=False, sendRC=False, 2409 keepStdout=True, usePTY=False) 2410 d = c.start() 2411 def _parse(res): 2412 tid = c.stdout.split("\n")[0].strip() 2413 slash = tid.index("/") 2414 dd = tid.rindex("--") 2415 #branch = tid[slash+1:dd] 2416 baserev = tid[dd+2:] 2417 return baserev
2418 d.addCallback(_parse) 2419 return d
2420 2421 registerSlaveCommand("arch", Arch, command_version) 2422
2423 -class Bazaar(Arch):
2424 """Bazaar (/usr/bin/baz) is an alternative client for Arch repositories. 2425 It is mostly option-compatible, but archive registration is different 2426 enough to warrant a separate Command. 2427 2428 ['archive'] (required): the name of the archive being used 2429 """ 2430
2431 - def setup(self, args):
2432 Arch.setup(self, args) 2433 self.vcexe = getCommand("baz") 2434 # baz doesn't emit the repository name after registration (and 2435 # grepping through the output of 'baz archives' is too hard), so we 2436 # require that the buildmaster configuration to provide both the 2437 # archive name and the URL. 2438 self.archive = args['archive'] # required for Baz 2439 self.sourcedata = "%s\n%s\n%s\n" % (self.url, self.version, 2440 self.buildconfig)
2441 2442 # in _didRegister, the regexp won't match, so we'll stick with the name 2443 # in self.archive 2444
2445 - def _doGet(self):
2446 # baz prefers ARCHIVE/VERSION. This will work even if 2447 # my-default-archive is not set. 2448 ver = self.archive + "/" + self.version 2449 if self.revision: 2450 ver += "--%s" % self.revision 2451 command = [self.vcexe, 'get', '--no-pristine', 2452 ver, self.srcdir] 2453 c = ShellCommand(self.builder, command, self.builder.basedir, 2454 sendRC=False, timeout=self.timeout, 2455 maxTime=self.maxTime, usePTY=False) 2456 self.command = c 2457 d = c.start() 2458 d.addCallback(self._abandonOnFailure) 2459 if self.buildconfig: 2460 d.addCallback(self._didGet) 2461 return d
2462
2463 - def parseGotRevision(self):
2464 # using code from tryclient.BazExtractor 2465 command = [self.vcexe, "tree-id"] 2466 c = ShellCommand(self.builder, command, 2467 os.path.join(self.builder.basedir, self.srcdir), 2468 environ=self.env, 2469 sendStdout=False, sendStderr=False, sendRC=False, 2470 keepStdout=True, usePTY=False) 2471 d = c.start() 2472 def _parse(res): 2473 tid = c.stdout.strip() 2474 slash = tid.index("/") 2475 dd = tid.rindex("--") 2476 #branch = tid[slash+1:dd] 2477 baserev = tid[dd+2:] 2478 return baserev
2479 d.addCallback(_parse) 2480 return d
2481 2482 registerSlaveCommand("bazaar", Bazaar, command_version) 2483 2484
2485 -class Bzr(SourceBase):
2486 """bzr-specific VC operation. In addition to the arguments 2487 handled by SourceBase, this command reads the following keys: 2488 2489 ['repourl'] (required): the Bzr repository string 2490 """ 2491 2492 header = "bzr operation" 2493
2494 - def setup(self, args):
2495 SourceBase.setup(self, args) 2496 self.vcexe = getCommand("bzr") 2497 self.repourl = args['repourl'] 2498 self.sourcedata = "%s\n" % self.repourl 2499 self.revision = self.args.get('revision') 2500 self.forceSharedRepo = args.get('forceSharedRepo')
2501
2502 - def sourcedirIsUpdateable(self):
2503 # checking out a specific revision requires a full 'bzr checkout' 2504 return (not self.revision and 2505 not self.sourcedirIsPatched() and 2506 os.path.isdir(os.path.join(self.builder.basedir, 2507 self.srcdir, ".bzr")))
2508
2509 - def start(self):
2510 def cont(res): 2511 # Continue with start() method in superclass. 2512 return SourceBase.start(self)
2513 2514 if self.forceSharedRepo: 2515 d = self.doForceSharedRepo(); 2516 d.addCallback(cont) 2517 return d 2518 else: 2519 return cont(None)
2520
2521 - def doVCUpdate(self):
2522 assert not self.revision 2523 # update: possible for mode in ('copy', 'update') 2524 srcdir = os.path.join(self.builder.basedir, self.srcdir) 2525 command = [self.vcexe, 'update'] 2526 c = ShellCommand(self.builder, command, srcdir, 2527 sendRC=False, timeout=self.timeout, 2528 maxTime=self.maxTime, usePTY=False) 2529 self.command = c 2530 return c.start()
2531
2532 - def doVCFull(self):
2533 # checkout or export 2534 d = self.builder.basedir 2535 if self.mode == "export": 2536 # exporting in bzr requires a separate directory 2537 return self.doVCExport() 2538 # originally I added --lightweight here, but then 'bzr revno' is 2539 # wrong. The revno reported in 'bzr version-info' is correct, 2540 # however. Maybe this is a bzr bug? 2541 # 2542 # In addition, you cannot perform a 'bzr update' on a repo pulled 2543 # from an HTTP repository that used 'bzr checkout --lightweight'. You 2544 # get a "ERROR: Cannot lock: transport is read only" when you try. 2545 # 2546 # So I won't bother using --lightweight for now. 2547 2548 command = [self.vcexe, 'checkout'] 2549 if self.revision: 2550 command.append('--revision') 2551 command.append(str(self.revision)) 2552 command.append(self.repourl) 2553 command.append(self.srcdir) 2554 2555 c = ShellCommand(self.builder, command, d, 2556 sendRC=False, timeout=self.timeout, 2557 maxTime=self.maxTime, usePTY=False) 2558 self.command = c 2559 d = c.start() 2560 return d
2561
2562 - def doVCExport(self):
2563 tmpdir = os.path.join(self.builder.basedir, "export-temp") 2564 srcdir = os.path.join(self.builder.basedir, self.srcdir) 2565 command = [self.vcexe, 'checkout', '--lightweight'] 2566 if self.revision: 2567 command.append('--revision') 2568 command.append(str(self.revision)) 2569 command.append(self.repourl) 2570 command.append(tmpdir) 2571 c = ShellCommand(self.builder, command, self.builder.basedir, 2572 sendRC=False, timeout=self.timeout, 2573 maxTime=self.maxTime, usePTY=False) 2574 self.command = c 2575 d = c.start() 2576 def _export(res): 2577 command = [self.vcexe, 'export', srcdir] 2578 c = ShellCommand(self.builder, command, tmpdir, 2579 sendRC=False, timeout=self.timeout, 2580 maxTime=self.maxTime, usePTY=False) 2581 self.command = c 2582 return c.start()
2583 d.addCallback(_export) 2584 return d 2585
2586 - def doForceSharedRepo(self):
2587 # Don't send stderr. When there is no shared repo, this might confuse 2588 # users, as they will see a bzr error message. But having no shared 2589 # repo is not an error, just an indication that we need to make one. 2590 c = ShellCommand(self.builder, [self.vcexe, 'info', '.'], 2591 self.builder.basedir, 2592 sendStderr=False, sendRC=False, usePTY=False) 2593 d = c.start() 2594 def afterCheckSharedRepo(res): 2595 if type(res) is int and res != 0: 2596 log.msg("No shared repo found, creating it") 2597 # bzr info fails, try to create shared repo. 2598 c = ShellCommand(self.builder, [self.vcexe, 'init-repo', '.'], 2599 self.builder.basedir, 2600 sendRC=False, usePTY=False) 2601 self.command = c 2602 return c.start() 2603 else: 2604 return defer.succeed(res)
2605 d.addCallback(afterCheckSharedRepo) 2606 return d 2607
2608 - def get_revision_number(self, out):
2609 # it feels like 'bzr revno' sometimes gives different results than 2610 # the 'revno:' line from 'bzr version-info', and the one from 2611 # version-info is more likely to be correct. 2612 for line in out.split("\n"): 2613 colon = line.find(":") 2614 if colon != -1: 2615 key, value = line[:colon], line[colon+2:] 2616 if key == "revno": 2617 return int(value) 2618 raise ValueError("unable to find revno: in bzr output: '%s'" % out)
2619
2620 - def parseGotRevision(self):
2621 command = [self.vcexe, "version-info"] 2622 c = ShellCommand(self.builder, command, 2623 os.path.join(self.builder.basedir, self.srcdir), 2624 environ=self.env, 2625 sendStdout=False, sendStderr=False, sendRC=False, 2626 keepStdout=True, usePTY=False) 2627 d = c.start() 2628 def _parse(res): 2629 try: 2630 return self.get_revision_number(c.stdout) 2631 except ValueError: 2632 msg =("Bzr.parseGotRevision unable to parse output " 2633 "of bzr version-info: '%s'" % c.stdout.strip()) 2634 log.msg(msg) 2635 self.sendStatus({'header': msg + "\n"}) 2636 return None
2637 d.addCallback(_parse) 2638 return d 2639 2640 registerSlaveCommand("bzr", Bzr, command_version) 2641
2642 -class Mercurial(SourceBase):
2643 """Mercurial specific VC operation. In addition to the arguments 2644 handled by SourceBase, this command reads the following keys: 2645 2646 ['repourl'] (required): the Mercurial repository string 2647 ['clobberOnBranchChange']: Document me. See ticket #462. 2648 """ 2649 2650 header = "mercurial operation" 2651
2652 - def setup(self, args):
2653 SourceBase.setup(self, args) 2654 self.vcexe = getCommand("hg") 2655 self.repourl = args['repourl'] 2656 self.clobberOnBranchChange = args.get('clobberOnBranchChange', True) 2657 self.sourcedata = "%s\n" % self.repourl 2658 self.branchType = args.get('branchType', 'dirname') 2659 self.stdout = "" 2660 self.stderr = ""
2661
2662 - def sourcedirIsUpdateable(self):
2663 return os.path.isdir(os.path.join(self.builder.basedir, 2664 self.srcdir, ".hg"))
2665
2666 - def doVCUpdate(self):
2667 d = os.path.join(self.builder.basedir, self.srcdir) 2668 command = [self.vcexe, 'pull', '--verbose', self.repourl] 2669 c = ShellCommand(self.builder, command, d, 2670 sendRC=False, timeout=self.timeout, 2671 maxTime=self.maxTime, keepStdout=True, usePTY=False) 2672 self.command = c 2673 d = c.start() 2674 d.addCallback(self._handleEmptyUpdate) 2675 d.addCallback(self._update) 2676 return d
2677
2678 - def _handleEmptyUpdate(self, res):
2679 if type(res) is int and res == 1: 2680 if self.command.stdout.find("no changes found") != -1: 2681 # 'hg pull', when it doesn't have anything to do, exits with 2682 # rc=1, and there appears to be no way to shut this off. It 2683 # emits a distinctive message to stdout, though. So catch 2684 # this and pretend that it completed successfully. 2685 return 0 2686 return res
2687
2688 - def doVCFull(self):
2689 d = os.path.join(self.builder.basedir, self.srcdir) 2690 command = [self.vcexe, 'clone', '--verbose', '--noupdate'] 2691 2692 # if got revision, clobbering and in dirname, only clone to specific revision 2693 # (otherwise, do full clone to re-use .hg dir for subsequent builds) 2694 if self.args.get('revision') and self.mode == 'clobber' and self.branchType == 'dirname': 2695 command.extend(['--rev', self.args.get('revision')]) 2696 command.extend([self.repourl, d]) 2697 2698 c = ShellCommand(self.builder, command, self.builder.basedir, 2699 sendRC=False, timeout=self.timeout, 2700 maxTime=self.maxTime, usePTY=False) 2701 self.command = c 2702 cmd1 = c.start() 2703 cmd1.addCallback(self._update) 2704 return cmd1
2705
2706 - def _clobber(self, dummy, dirname):
2707 def _vcfull(res): 2708 return self.doVCFull()
2709 2710 c = self.doClobber(dummy, dirname) 2711 c.addCallback(_vcfull) 2712 2713 return c
2714
2715 - def _purge(self, dummy, dirname):
2716 d = os.path.join(self.builder.basedir, self.srcdir) 2717 purge = [self.vcexe, 'purge', '--all'] 2718 purgeCmd = ShellCommand(self.builder, purge, d, 2719 sendStdout=False, sendStderr=False, 2720 keepStdout=True, keepStderr=True, usePTY=False) 2721 2722 def _clobber(res): 2723 if res != 0: 2724 # purge failed, we need to switch to a classic clobber 2725 msg = "'hg purge' failed: %s\n%s. Clobbering." % (purgeCmd.stdout, purgeCmd.stderr) 2726 self.sendStatus({'header': msg + "\n"}) 2727 log.msg(msg) 2728 2729 return self._clobber(dummy, dirname) 2730 2731 # Purge was a success, then we need to update 2732 return self._update2(res)
2733 2734 p = purgeCmd.start() 2735 p.addCallback(_clobber) 2736 return p 2737
2738 - def _update(self, res):
2739 if res != 0: 2740 return res 2741 2742 # compare current branch to update 2743 self.update_branch = self.args.get('branch', 'default') 2744 2745 d = os.path.join(self.builder.basedir, self.srcdir) 2746 parentscmd = [self.vcexe, 'identify', '--num', '--branch'] 2747 cmd = ShellCommand(self.builder, parentscmd, d, 2748 sendStdout=False, sendStderr=False, 2749 keepStdout=True, keepStderr=True, usePTY=False) 2750 2751 self.clobber = None 2752 2753 def _parseIdentify(res): 2754 if res != 0: 2755 msg = "'hg identify' failed: %s\n%s" % (cmd.stdout, cmd.stderr) 2756 self.sendStatus({'header': msg + "\n"}) 2757 log.msg(msg) 2758 return res 2759 2760 log.msg('Output: %s' % cmd.stdout) 2761 2762 match = re.search(r'^(.+) (.+)$', cmd.stdout) 2763 assert match 2764 2765 rev = match.group(1) 2766 current_branch = match.group(2) 2767 2768 if rev == '-1': 2769 msg = "Fresh hg repo, don't worry about in-repo branch name" 2770 log.msg(msg) 2771 2772 elif self.sourcedirIsPatched(): 2773 self.clobber = self._purge 2774 2775 elif self.update_branch != current_branch: 2776 msg = "Working dir is on in-repo branch '%s' and build needs '%s'." % (current_branch, self.update_branch) 2777 if self.clobberOnBranchChange: 2778 msg += ' Cloberring.' 2779 else: 2780 msg += ' Updating.' 2781 2782 self.sendStatus({'header': msg + "\n"}) 2783 log.msg(msg) 2784 2785 # Clobbers only if clobberOnBranchChange is set 2786 if self.clobberOnBranchChange: 2787 self.clobber = self._purge 2788 2789 else: 2790 msg = "Working dir on same in-repo branch as build (%s)." % (current_branch) 2791 log.msg(msg) 2792 2793 return 0
2794 2795 def _checkRepoURL(res): 2796 parentscmd = [self.vcexe, 'paths', 'default'] 2797 cmd2 = ShellCommand(self.builder, parentscmd, d, 2798 sendStdout=False, sendStderr=False, 2799 keepStdout=True, keepStderr=True, usePTY=False) 2800 2801 def _parseRepoURL(res): 2802 if res == 1: 2803 if "not found!" == cmd2.stderr.strip(): 2804 msg = "hg default path not set. Not checking repo url for clobber test" 2805 log.msg(msg) 2806 return 0 2807 else: 2808 msg = "'hg paths default' failed: %s\n%s" % (cmd2.stdout, cmd2.stderr) 2809 log.msg(msg) 2810 return 1 2811 2812 oldurl = cmd2.stdout.strip() 2813 2814 log.msg("Repo cloned from: '%s'" % oldurl) 2815 2816 if sys.platform == "win32": 2817 oldurl = oldurl.lower().replace('\\', '/') 2818 repourl = self.repourl.lower().replace('\\', '/') 2819 if repourl.startswith('file://'): 2820 repourl = repourl.split('file://')[1] 2821 else: 2822 repourl = self.repourl 2823 2824 oldurl = remove_userpassword(oldurl) 2825 repourl = remove_userpassword(repourl) 2826 2827 if oldurl != repourl: 2828 self.clobber = self._clobber 2829 msg = "RepoURL changed from '%s' in wc to '%s' in update. Clobbering" % (oldurl, repourl) 2830 log.msg(msg) 2831 2832 return 0 2833 2834 c = cmd2.start() 2835 c.addCallback(_parseRepoURL) 2836 return c 2837 2838 def _maybeClobber(res): 2839 if self.clobber: 2840 msg = "Clobber flag set. Doing clobbering" 2841 log.msg(msg) 2842 2843 def _vcfull(res): 2844 return self.doVCFull() 2845 2846 return self.clobber(None, self.srcdir) 2847 2848 return 0 2849 2850 c = cmd.start() 2851 c.addCallback(_parseIdentify) 2852 c.addCallback(_checkRepoURL) 2853 c.addCallback(_maybeClobber) 2854 c.addCallback(self._update2) 2855 return c 2856
2857 - def _update2(self, res):
2858 d = os.path.join(self.builder.basedir, self.srcdir) 2859 2860 updatecmd=[self.vcexe, 'update', '--clean', '--repository', d] 2861 if self.args.get('revision'): 2862 updatecmd.extend(['--rev', self.args['revision']]) 2863 else: 2864 updatecmd.extend(['--rev', self.args.get('branch', 'default')]) 2865 self.command = ShellCommand(self.builder, updatecmd, 2866 self.builder.basedir, sendRC=False, 2867 timeout=self.timeout, maxTime=self.maxTime, usePTY=False) 2868 return self.command.start()
2869
2870 - def parseGotRevision(self):
2871 # we use 'hg identify' to find out what we wound up with 2872 command = [self.vcexe, "identify"] 2873 c = ShellCommand(self.builder, command, 2874 os.path.join(self.builder.basedir, self.srcdir), 2875 environ=self.env, 2876 sendStdout=False, sendStderr=False, sendRC=False, 2877 keepStdout=True, usePTY=False) 2878 d = c.start() 2879 def _parse(res): 2880 m = re.search(r'^(\w+)', c.stdout) 2881 return m.group(1)
2882 d.addCallback(_parse) 2883 return d 2884 2885 registerSlaveCommand("hg", Mercurial, command_version) 2886 2887
2888 -class P4Base(SourceBase):
2889 """Base class for P4 source-updaters 2890 2891 ['p4port'] (required): host:port for server to access 2892 ['p4user'] (optional): user to use for access 2893 ['p4passwd'] (optional): passwd to try for the user 2894 ['p4client'] (optional): client spec to use 2895 """
2896 - def setup(self, args):
2897 SourceBase.setup(self, args) 2898 self.p4port = args['p4port'] 2899 self.p4client = args['p4client'] 2900 self.p4user = args['p4user'] 2901 self.p4passwd = args['p4passwd']
2902
2903 - def parseGotRevision(self):
2904 # Executes a p4 command that will give us the latest changelist number 2905 # of any file under the current (or default) client: 2906 command = ['p4'] 2907 if self.p4port: 2908 command.extend(['-p', self.p4port]) 2909 if self.p4user: 2910 command.extend(['-u', self.p4user]) 2911 if self.p4passwd: 2912 command.extend(['-P', self.p4passwd]) 2913 if self.p4client: 2914 command.extend(['-c', self.p4client]) 2915 # add '-s submitted' for bug #626 2916 command.extend(['changes', '-s', 'submitted', '-m', '1', '#have']) 2917 c = ShellCommand(self.builder, command, self.builder.basedir, 2918 environ=self.env, timeout=self.timeout, 2919 maxTime=self.maxTime, sendStdout=True, 2920 sendStderr=False, sendRC=False, keepStdout=True, 2921 usePTY=False) 2922 self.command = c 2923 d = c.start() 2924 2925 def _parse(res): 2926 # 'p4 -c clien-name change -m 1 "#have"' will produce an output like: 2927 # "Change 28147 on 2008/04/07 by p4user@hostname..." 2928 # The number after "Change" is the one we want. 2929 m = re.match('Change\s+(\d+)\s+', c.stdout) 2930 if m: 2931 return m.group(1) 2932 return None
2933 d.addCallback(_parse) 2934 return d
2935 2936
2937 -class P4(P4Base):
2938 """A P4 source-updater. 2939 2940 ['p4port'] (required): host:port for server to access 2941 ['p4user'] (optional): user to use for access 2942 ['p4passwd'] (optional): passwd to try for the user 2943 ['p4client'] (optional): client spec to use 2944 ['p4extra_views'] (optional): additional client views to use 2945 """ 2946 2947 header = "p4" 2948
2949 - def setup(self, args):
2950 P4Base.setup(self, args) 2951 self.p4base = args['p4base'] 2952 self.p4extra_views = args['p4extra_views'] 2953 self.p4mode = args['mode'] 2954 self.p4branch = args['branch'] 2955 2956 self.sourcedata = str([ 2957 # Perforce server. 2958 self.p4port, 2959 2960 # Client spec. 2961 self.p4client, 2962 2963 # Depot side of view spec. 2964 self.p4base, 2965 self.p4branch, 2966 self.p4extra_views, 2967 2968 # Local side of view spec (srcdir is made from these). 2969 self.builder.basedir, 2970 self.mode, 2971 self.workdir 2972 ])
2973 2974
2975 - def sourcedirIsUpdateable(self):
2976 # We assume our client spec is still around. 2977 # We just say we aren't updateable if the dir doesn't exist so we 2978 # don't get ENOENT checking the sourcedata. 2979 return (not self.sourcedirIsPatched() and 2980 os.path.isdir(os.path.join(self.builder.basedir, 2981 self.srcdir)))
2982
2983 - def doVCUpdate(self):
2984 return self._doP4Sync(force=False)
2985
2986 - def _doP4Sync(self, force):
2987 command = ['p4'] 2988 2989 if self.p4port: 2990 command.extend(['-p', self.p4port]) 2991 if self.p4user: 2992 command.extend(['-u', self.p4user]) 2993 if self.p4passwd: 2994 command.extend(['-P', self.p4passwd]) 2995 if self.p4client: 2996 command.extend(['-c', self.p4client]) 2997 command.extend(['sync']) 2998 if force: 2999 command.extend(['-f']) 3000 if self.revision: 3001 command.extend(['@' + str(self.revision)]) 3002 env = {} 3003 c = ShellCommand(self.builder, command, self.builder.basedir, 3004 environ=env, sendRC=False, timeout=self.timeout, 3005 maxTime=self.maxTime, keepStdout=True, usePTY=False) 3006 self.command = c 3007 d = c.start() 3008 d.addCallback(self._abandonOnFailure) 3009 return d
3010 3011
3012 - def doVCFull(self):
3013 env = {} 3014 command = ['p4'] 3015 client_spec = '' 3016 client_spec += "Client: %s\n\n" % self.p4client 3017 client_spec += "Owner: %s\n\n" % self.p4user 3018 client_spec += "Description:\n\tCreated by %s\n\n" % self.p4user 3019 client_spec += "Root:\t%s\n\n" % self.builder.basedir 3020 client_spec += "Options:\tallwrite rmdir\n\n" 3021 client_spec += "LineEnd:\tlocal\n\n" 3022 3023 # Setup a view 3024 client_spec += "View:\n\t%s" % (self.p4base) 3025 if self.p4branch: 3026 client_spec += "%s/" % (self.p4branch) 3027 client_spec += "... //%s/%s/...\n" % (self.p4client, self.srcdir) 3028 if self.p4extra_views: 3029 for k, v in self.p4extra_views: 3030 client_spec += "\t%s/... //%s/%s%s/...\n" % (k, self.p4client, 3031 self.srcdir, v) 3032 if self.p4port: 3033 command.extend(['-p', self.p4port]) 3034 if self.p4user: 3035 command.extend(['-u', self.p4user]) 3036 if self.p4passwd: 3037 command.extend(['-P', self.p4passwd]) 3038 command.extend(['client', '-i']) 3039 log.msg(client_spec) 3040 c = ShellCommand(self.builder, command, self.builder.basedir, 3041 environ=env, sendRC=False, timeout=self.timeout, 3042 maxTime=self.maxTime, initialStdin=client_spec, 3043 usePTY=False) 3044 self.command = c 3045 d = c.start() 3046 d.addCallback(self._abandonOnFailure) 3047 d.addCallback(lambda _: self._doP4Sync(force=True)) 3048 return d
3049
3050 - def parseGotRevision(self):
3051 rv = None 3052 if self.revision: 3053 rv = str(self.revision) 3054 return rv
3055 3056 registerSlaveCommand("p4", P4, command_version) 3057 3058
3059 -class P4Sync(P4Base):
3060 """A partial P4 source-updater. Requires manual setup of a per-slave P4 3061 environment. The only thing which comes from the master is P4PORT. 3062 'mode' is required to be 'copy'. 3063 3064 ['p4port'] (required): host:port for server to access 3065 ['p4user'] (optional): user to use for access 3066 ['p4passwd'] (optional): passwd to try for the user 3067 ['p4client'] (optional): client spec to use 3068 """ 3069 3070 header = "p4 sync" 3071
3072 - def setup(self, args):
3073 P4Base.setup(self, args) 3074 self.vcexe = getCommand("p4")
3075
3076 - def sourcedirIsUpdateable(self):
3077 return True
3078
3079 - def _doVC(self, force):
3080 d = os.path.join(self.builder.basedir, self.srcdir) 3081 command = [self.vcexe] 3082 if self.p4port: 3083 command.extend(['-p', self.p4port]) 3084 if self.p4user: 3085 command.extend(['-u', self.p4user]) 3086 if self.p4passwd: 3087 command.extend(['-P', self.p4passwd]) 3088 if self.p4client: 3089 command.extend(['-c', self.p4client]) 3090 command.extend(['sync']) 3091 if force: 3092 command.extend(['-f']) 3093 if self.revision: 3094 command.extend(['@' + self.revision]) 3095 env = {} 3096 c = ShellCommand(self.builder, command, d, environ=env, 3097 sendRC=False, timeout=self.timeout, 3098 maxTime=self.maxTime, usePTY=False) 3099 self.command = c 3100 return c.start()
3101
3102 - def doVCUpdate(self):
3103 return self._doVC(force=False)
3104
3105 - def doVCFull(self):
3106 return self._doVC(force=True)
3107
3108 - def parseGotRevision(self):
3109 rv = None 3110 if self.revision: 3111 rv = str(self.revision) 3112 return rv
3113 3114 registerSlaveCommand("p4sync", P4Sync, command_version) 3115