Package buildslave :: Module runprocess
[frames] | no frames]

Source Code for Module buildslave.runprocess

  1  # This file is part of Buildbot.  Buildbot is free software: you can 
  2  # redistribute it and/or modify it under the terms of the GNU General Public 
  3  # License as published by the Free Software Foundation, version 2. 
  4  # 
  5  # This program is distributed in the hope that it will be useful, but WITHOUT 
  6  # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 
  7  # FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more 
  8  # details. 
  9  # 
 10  # You should have received a copy of the GNU General Public License along with 
 11  # this program; if not, write to the Free Software Foundation, Inc., 51 
 12  # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 
 13  # 
 14  # Copyright Buildbot Team Members 
 15   
 16  """ 
 17  Support for running 'shell commands' 
 18  """ 
 19   
 20  import os 
 21  import signal 
 22  import types 
 23  import re 
 24  import traceback 
 25  import stat 
 26  from collections import deque 
 27   
 28  from twisted.python import runtime, log 
 29  from twisted.internet import reactor, defer, protocol, task, error 
 30   
 31  from buildslave import util 
 32  from buildslave.exceptions import AbandonChain 
 33   
34 -def shell_quote(cmd_list):
35 # attempt to quote cmd_list such that a shell will properly re-interpret 36 # it. The pipes module is only available on UNIX, and Windows "shell" 37 # quoting is indescribably convoluted - so much so that it's not clear it's 38 # reversible. Also, the quote function is undocumented (although it looks 39 # like it will be documentd soon: http://bugs.python.org/issue9723). 40 # Finally, it has a nasty bug in some versions where an empty string is not 41 # quoted. 42 # 43 # So: 44 # - use pipes.quote on UNIX, handling '' as a special case 45 # - use Python's repr() on Windows, as a best effort 46 if runtime.platformType == 'win32': 47 return " ".join([ `e` for e in cmd_list ]) 48 else: 49 import pipes 50 def quote(e): 51 if not e: 52 return '""' 53 return pipes.quote(e)
54 return " ".join([ quote(e) for e in cmd_list ]) 55
56 -class LogFileWatcher:
57 POLL_INTERVAL = 2 58
59 - def __init__(self, command, name, logfile, follow=False):
60 self.command = command 61 self.name = name 62 self.logfile = logfile 63 64 log.msg("LogFileWatcher created to watch %s" % logfile) 65 # we are created before the ShellCommand starts. If the logfile we're 66 # supposed to be watching already exists, record its size and 67 # ctime/mtime so we can tell when it starts to change. 68 self.old_logfile_stats = self.statFile() 69 self.started = False 70 71 # follow the file, only sending back lines 72 # added since we started watching 73 self.follow = follow 74 75 # every 2 seconds we check on the file again 76 self.poller = task.LoopingCall(self.poll)
77
78 - def start(self):
79 self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
80
81 - def _cleanupPoll(self, err):
82 log.err(err, msg="Polling error") 83 self.poller = None
84
85 - def stop(self):
86 self.poll() 87 if self.poller is not None: 88 self.poller.stop() 89 if self.started: 90 self.f.close()
91
92 - def statFile(self):
93 if os.path.exists(self.logfile): 94 s = os.stat(self.logfile) 95 return (s[stat.ST_CTIME], s[stat.ST_MTIME], s[stat.ST_SIZE]) 96 return None
97
98 - def poll(self):
99 if not self.started: 100 s = self.statFile() 101 if s == self.old_logfile_stats: 102 return # not started yet 103 if not s: 104 # the file was there, but now it's deleted. Forget about the 105 # initial state, clearly the process has deleted the logfile 106 # in preparation for creating a new one. 107 self.old_logfile_stats = None 108 return # no file to work with 109 self.f = open(self.logfile, "rb") 110 # if we only want new lines, seek to 111 # where we stat'd so we only find new 112 # lines 113 if self.follow: 114 self.f.seek(s[2], 0) 115 self.started = True 116 self.f.seek(self.f.tell(), 0) 117 while True: 118 data = self.f.read(10000) 119 if not data: 120 return 121 self.command.addLogfile(self.name, data)
122 123
124 -class RunProcessPP(protocol.ProcessProtocol):
125 debug = False 126
127 - def __init__(self, command):
128 self.command = command 129 self.pending_stdin = "" 130 self.stdin_finished = False 131 self.killed = False
132
133 - def writeStdin(self, data):
134 assert not self.stdin_finished 135 if self.connected: 136 self.transport.write(data) 137 else: 138 self.pending_stdin += data
139
140 - def closeStdin(self):
141 if self.connected: 142 if self.debug: log.msg(" closing stdin") 143 self.transport.closeStdin() 144 self.stdin_finished = True
145
146 - def connectionMade(self):
147 if self.debug: 148 log.msg("RunProcessPP.connectionMade") 149 if not self.command.process: 150 if self.debug: 151 log.msg(" assigning self.command.process: %s" % 152 (self.transport,)) 153 self.command.process = self.transport 154 155 # TODO: maybe we shouldn't close stdin when using a PTY. I can't test 156 # this yet, recent debian glibc has a bug which causes thread-using 157 # test cases to SIGHUP trial, and the workaround is to either run 158 # the whole test with /bin/sh -c " ".join(argv) (way gross) or to 159 # not use a PTY. Once the bug is fixed, I'll be able to test what 160 # happens when you close stdin on a pty. My concern is that it will 161 # SIGHUP the child (since we are, in a sense, hanging up on them). 162 # But it may well be that keeping stdout open prevents the SIGHUP 163 # from being sent. 164 #if not self.command.usePTY: 165 166 if self.pending_stdin: 167 if self.debug: log.msg(" writing to stdin") 168 self.transport.write(self.pending_stdin) 169 if self.stdin_finished: 170 if self.debug: log.msg(" closing stdin") 171 self.transport.closeStdin()
172
173 - def outReceived(self, data):
174 if self.debug: 175 log.msg("RunProcessPP.outReceived") 176 self.command.addStdout(data)
177
178 - def errReceived(self, data):
179 if self.debug: 180 log.msg("RunProcessPP.errReceived") 181 self.command.addStderr(data)
182
183 - def processEnded(self, status_object):
184 if self.debug: 185 log.msg("RunProcessPP.processEnded", status_object) 186 # status_object is a Failure wrapped around an 187 # error.ProcessTerminated or and error.ProcessDone. 188 # requires twisted >= 1.0.4 to overcome a bug in process.py 189 sig = status_object.value.signal 190 rc = status_object.value.exitCode 191 192 # sometimes, even when we kill a process, GetExitCodeProcess will still return 193 # a zero exit status. So we force it. See 194 # http://stackoverflow.com/questions/2061735/42-passed-to-terminateprocess-sometimes-getexitcodeprocess-returns-0 195 if self.killed and rc == 0: 196 log.msg("process was killed, but exited with status 0; faking a failure") 197 # windows returns '1' even for signalled failsres, while POSIX returns -1 198 if runtime.platformType == 'win32': 199 rc = 1 200 else: 201 rc = -1 202 self.command.finished(sig, rc)
203
204 -class RunProcess:
205 """ 206 This is a helper class, used by slave commands to run programs in a child 207 shell. 208 """ 209 210 notreally = False 211 BACKUP_TIMEOUT = 5 212 KILL = "KILL" 213 CHUNK_LIMIT = 128*1024 214 215 # Don't send any data until at least BUFFER_SIZE bytes have been collected 216 # or BUFFER_TIMEOUT elapsed 217 BUFFER_SIZE = 64*1024 218 BUFFER_TIMEOUT = 5 219 220 # For sending elapsed time: 221 startTime = None 222 elapsedTime = None 223 224 # For scheduling future events 225 _reactor = reactor 226 227 # I wish we had easy access to CLOCK_MONOTONIC in Python: 228 # http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html 229 # Then changes to the system clock during a run wouldn't effect the "elapsed 230 # time" results. 231
232 - def __init__(self, builder, command, 233 workdir, environ=None, 234 sendStdout=True, sendStderr=True, sendRC=True, 235 timeout=None, maxTime=None, initialStdin=None, 236 keepStdinOpen=False, keepStdout=False, keepStderr=False, 237 logEnviron=True, logfiles={}, usePTY="slave-config"):
238 """ 239 240 @param keepStdout: if True, we keep a copy of all the stdout text 241 that we've seen. This copy is available in 242 self.stdout, which can be read after the command 243 has finished. 244 @param keepStderr: same, for stderr 245 246 @param usePTY: "slave-config" -> use the SlaveBuilder's usePTY; 247 otherwise, true to use a PTY, false to not use a PTY. 248 """ 249 250 self.builder = builder 251 self.command = util.Obfuscated.get_real(command) 252 253 # We need to take unicode commands and arguments and encode them using 254 # the appropriate encoding for the slave. This is mostly platform 255 # specific, but can be overridden in the slave's buildbot.tac file. 256 # 257 # Encoding the command line here ensures that the called executables 258 # receive arguments as bytestrings encoded with an appropriate 259 # platform-specific encoding. It also plays nicely with twisted's 260 # spawnProcess which checks that arguments are regular strings or 261 # unicode strings that can be encoded as ascii (which generates a 262 # warning). 263 if isinstance(self.command, (tuple, list)): 264 for i, a in enumerate(self.command): 265 if isinstance(a, unicode): 266 self.command[i] = a.encode(self.builder.unicode_encoding) 267 elif isinstance(self.command, unicode): 268 self.command = self.command.encode(self.builder.unicode_encoding) 269 270 self.fake_command = util.Obfuscated.get_fake(command) 271 self.sendStdout = sendStdout 272 self.sendStderr = sendStderr 273 self.sendRC = sendRC 274 self.logfiles = logfiles 275 self.workdir = workdir 276 if not os.path.exists(workdir): 277 os.makedirs(workdir) 278 if environ: 279 if environ.has_key('PYTHONPATH'): 280 ppath = environ['PYTHONPATH'] 281 # Need to do os.pathsep translation. We could either do that 282 # by replacing all incoming ':'s with os.pathsep, or by 283 # accepting lists. I like lists better. 284 if not isinstance(ppath, str): 285 # If it's not a string, treat it as a sequence to be 286 # turned in to a string. 287 ppath = os.pathsep.join(ppath) 288 289 environ['PYTHONPATH'] = ppath + os.pathsep + "${PYTHONPATH}" 290 291 # do substitution on variable values matching patern: ${name} 292 p = re.compile('\${([0-9a-zA-Z_]*)}') 293 def subst(match): 294 return os.environ.get(match.group(1), "")
295 newenv = {} 296 for key in os.environ.keys(): 297 # setting a key to None will delete it from the slave environment 298 if key not in environ or environ[key] is not None: 299 newenv[key] = os.environ[key] 300 for key in environ.keys(): 301 if environ[key] is not None: 302 newenv[key] = p.sub(subst, environ[key]) 303 304 self.environ = newenv 305 else: # not environ 306 self.environ = os.environ.copy() 307 self.initialStdin = initialStdin 308 self.keepStdinOpen = keepStdinOpen 309 self.logEnviron = logEnviron 310 self.timeout = timeout 311 self.timer = None 312 self.maxTime = maxTime 313 self.maxTimer = None 314 self.keepStdout = keepStdout 315 self.keepStderr = keepStderr 316 317 self.buffered = deque() 318 self.buflen = 0 319 self.buftimer = None 320 321 if usePTY == "slave-config": 322 self.usePTY = self.builder.usePTY 323 else: 324 self.usePTY = usePTY 325 326 # usePTY=True is a convenience for cleaning up all children and 327 # grandchildren of a hung command. Fall back to usePTY=False on systems 328 # and in situations where ptys cause problems. PTYs are posix-only, 329 # and for .closeStdin to matter, we must use a pipe, not a PTY 330 if runtime.platformType != "posix" or initialStdin is not None: 331 if self.usePTY and usePTY != "slave-config": 332 self.sendStatus({'header': "WARNING: disabling usePTY for this command"}) 333 self.usePTY = False 334 335 self.logFileWatchers = [] 336 for name,filevalue in self.logfiles.items(): 337 filename = filevalue 338 follow = False 339 340 # check for a dictionary of options 341 # filename is required, others are optional 342 if type(filevalue) == dict: 343 filename = filevalue['filename'] 344 follow = filevalue.get('follow', False) 345 346 w = LogFileWatcher(self, name, 347 os.path.join(self.workdir, filename), 348 follow=follow) 349 self.logFileWatchers.append(w)
350
351 - def __repr__(self):
352 return "<%s '%s'>" % (self.__class__.__name__, self.fake_command)
353
354 - def sendStatus(self, status):
355 self.builder.sendUpdate(status)
356
357 - def start(self):
358 # return a Deferred which fires (with the exit code) when the command 359 # completes 360 if self.keepStdout: 361 self.stdout = "" 362 if self.keepStderr: 363 self.stderr = "" 364 self.deferred = defer.Deferred() 365 try: 366 self._startCommand() 367 except: 368 log.msg("error in RunProcess._startCommand") 369 log.err() 370 self._addToBuffers('stderr', "error in RunProcess._startCommand\n") 371 self._addToBuffers('stderr', traceback.format_exc()) 372 self._sendBuffers() 373 # pretend it was a shell error 374 self.deferred.errback(AbandonChain(-1)) 375 return self.deferred
376
377 - def _startCommand(self):
378 # ensure workdir exists 379 if not os.path.isdir(self.workdir): 380 os.makedirs(self.workdir) 381 log.msg("RunProcess._startCommand") 382 if self.notreally: 383 self._addToBuffers('header', "command '%s' in dir %s" % \ 384 (self.fake_command, self.workdir)) 385 self._addToBuffers('header', "(not really)\n") 386 self.finished(None, 0) 387 return 388 389 self.pp = RunProcessPP(self) 390 391 if type(self.command) in types.StringTypes: 392 if runtime.platformType == 'win32': 393 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args 394 if '/c' not in argv: argv += ['/c'] 395 argv += [self.command] 396 else: 397 # for posix, use /bin/sh. for other non-posix, well, doesn't 398 # hurt to try 399 argv = ['/bin/sh', '-c', self.command] 400 display = self.fake_command 401 else: 402 # On windows, CreateProcess requires an absolute path to the executable. 403 # When we call spawnProcess below, we pass argv[0] as the executable. 404 # So, for .exe's that we have absolute paths to, we can call directly 405 # Otherwise, we should run under COMSPEC (usually cmd.exe) to 406 # handle path searching, etc. 407 if runtime.platformType == 'win32' and not \ 408 (self.command[0].lower().endswith(".exe") and os.path.isabs(self.command[0])): 409 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args 410 if '/c' not in argv: argv += ['/c'] 411 argv += list(self.command) 412 else: 413 argv = self.command 414 # Attempt to format this for use by a shell, although the process isn't perfect 415 display = shell_quote(self.fake_command) 416 417 # $PWD usually indicates the current directory; spawnProcess may not 418 # update this value, though, so we set it explicitly here. This causes 419 # weird problems (bug #456) on msys, though.. 420 if not self.environ.get('MACHTYPE', None) == 'i686-pc-msys': 421 self.environ['PWD'] = os.path.abspath(self.workdir) 422 423 # self.stdin is handled in RunProcessPP.connectionMade 424 425 log.msg(" " + display) 426 self._addToBuffers('header', display+"\n") 427 428 # then comes the secondary information 429 msg = " in dir %s" % (self.workdir,) 430 if self.timeout: 431 msg += " (timeout %d secs)" % (self.timeout,) 432 log.msg(" " + msg) 433 self._addToBuffers('header', msg+"\n") 434 435 msg = " watching logfiles %s" % (self.logfiles,) 436 log.msg(" " + msg) 437 self._addToBuffers('header', msg+"\n") 438 439 # then the obfuscated command array for resolving unambiguity 440 msg = " argv: %s" % (self.fake_command,) 441 log.msg(" " + msg) 442 self._addToBuffers('header', msg+"\n") 443 444 # then the environment, since it sometimes causes problems 445 if self.logEnviron: 446 msg = " environment:\n" 447 env_names = self.environ.keys() 448 env_names.sort() 449 for name in env_names: 450 msg += " %s=%s\n" % (name, self.environ[name]) 451 log.msg(" environment: %s" % (self.environ,)) 452 self._addToBuffers('header', msg) 453 454 if self.initialStdin: 455 msg = " writing %d bytes to stdin" % len(self.initialStdin) 456 log.msg(" " + msg) 457 self._addToBuffers('header', msg+"\n") 458 459 if self.keepStdinOpen: 460 msg = " leaving stdin open" 461 else: 462 msg = " closing stdin" 463 log.msg(" " + msg) 464 self._addToBuffers('header', msg+"\n") 465 466 msg = " using PTY: %s" % bool(self.usePTY) 467 log.msg(" " + msg) 468 self._addToBuffers('header', msg+"\n") 469 470 # this will be buffered until connectionMade is called 471 if self.initialStdin: 472 self.pp.writeStdin(self.initialStdin) 473 if not self.keepStdinOpen: 474 self.pp.closeStdin() 475 476 # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns 477 # None, as opposed to all the posixbase-derived reactors (which 478 # return the new Process object). This is a nuisance. We can make up 479 # for it by having the ProcessProtocol give us their .transport 480 # attribute after they get one. I'd prefer to get it from 481 # spawnProcess because I'm concerned about returning from this method 482 # without having a valid self.process to work with. (if kill() were 483 # called right after we return, but somehow before connectionMade 484 # were called, then kill() would blow up). 485 self.process = None 486 self.startTime = util.now(self._reactor) 487 488 p = reactor.spawnProcess(self.pp, argv[0], argv, 489 self.environ, 490 self.workdir, 491 usePTY=self.usePTY) 492 # connectionMade might have been called during spawnProcess 493 if not self.process: 494 self.process = p 495 496 # connectionMade also closes stdin as long as we're not using a PTY. 497 # This is intended to kill off inappropriately interactive commands 498 # better than the (long) hung-command timeout. ProcessPTY should be 499 # enhanced to allow the same childFDs argument that Process takes, 500 # which would let us connect stdin to /dev/null . 501 502 if self.timeout: 503 self.timer = self._reactor.callLater(self.timeout, self.doTimeout) 504 505 if self.maxTime: 506 self.maxTimer = self._reactor.callLater(self.maxTime, self.doMaxTimeout) 507 508 for w in self.logFileWatchers: 509 w.start()
510 511
512 - def _chunkForSend(self, data):
513 """ 514 limit the chunks that we send over PB to 128k, since it has a hardwired 515 string-size limit of 640k. 516 """ 517 LIMIT = self.CHUNK_LIMIT 518 for i in range(0, len(data), LIMIT): 519 yield data[i:i+LIMIT]
520
521 - def _collapseMsg(self, msg):
522 """ 523 Take msg, which is a dictionary of lists of output chunks, and 524 concatentate all the chunks into a single string 525 """ 526 retval = {} 527 for log in msg: 528 data = "".join(msg[log]) 529 if isinstance(log, tuple) and log[0] == 'log': 530 retval['log'] = (log[1], data) 531 else: 532 retval[log] = data 533 return retval
534
535 - def _sendMessage(self, msg):
536 """ 537 Collapse and send msg to the master 538 """ 539 if not msg: 540 return 541 msg = self._collapseMsg(msg) 542 self.sendStatus(msg)
543
544 - def _bufferTimeout(self):
545 self.buftimer = None 546 self._sendBuffers()
547
548 - def _sendBuffers(self):
549 """ 550 Send all the content in our buffers. 551 """ 552 msg = {} 553 msg_size = 0 554 lastlog = None 555 logdata = [] 556 while self.buffered: 557 # Grab the next bits from the buffer 558 logname, data = self.buffered.popleft() 559 560 # If this log is different than the last one, then we have to send 561 # out the message so far. This is because the message is 562 # transferred as a dictionary, which makes the ordering of keys 563 # unspecified, and makes it impossible to interleave data from 564 # different logs. A future enhancement could be to change the 565 # master to support a list of (logname, data) tuples instead of a 566 # dictionary. 567 # On our first pass through this loop lastlog is None 568 if lastlog is None: 569 lastlog = logname 570 elif logname != lastlog: 571 self._sendMessage(msg) 572 msg = {} 573 msg_size = 0 574 lastlog = logname 575 576 logdata = msg.setdefault(logname, []) 577 578 # Chunkify the log data to make sure we're not sending more than 579 # CHUNK_LIMIT at a time 580 for chunk in self._chunkForSend(data): 581 if len(chunk) == 0: continue 582 logdata.append(chunk) 583 msg_size += len(chunk) 584 if msg_size >= self.CHUNK_LIMIT: 585 # We've gone beyond the chunk limit, so send out our 586 # message. At worst this results in a message slightly 587 # larger than (2*CHUNK_LIMIT)-1 588 self._sendMessage(msg) 589 msg = {} 590 logdata = msg.setdefault(logname, []) 591 msg_size = 0 592 self.buflen = 0 593 if logdata: 594 self._sendMessage(msg) 595 if self.buftimer: 596 if self.buftimer.active(): 597 self.buftimer.cancel() 598 self.buftimer = None
599
600 - def _addToBuffers(self, logname, data):
601 """ 602 Add data to the buffer for logname 603 Start a timer to send the buffers if BUFFER_TIMEOUT elapses. 604 If adding data causes the buffer size to grow beyond BUFFER_SIZE, then 605 the buffers will be sent. 606 """ 607 n = len(data) 608 609 self.buflen += n 610 self.buffered.append((logname, data)) 611 if self.buflen > self.BUFFER_SIZE: 612 self._sendBuffers() 613 elif not self.buftimer: 614 self.buftimer = self._reactor.callLater(self.BUFFER_TIMEOUT, self._bufferTimeout)
615
616 - def addStdout(self, data):
617 if self.sendStdout: 618 self._addToBuffers('stdout', data) 619 620 if self.keepStdout: 621 self.stdout += data 622 if self.timer: 623 self.timer.reset(self.timeout)
624
625 - def addStderr(self, data):
626 if self.sendStderr: 627 self._addToBuffers('stderr', data) 628 629 if self.keepStderr: 630 self.stderr += data 631 if self.timer: 632 self.timer.reset(self.timeout)
633
634 - def addLogfile(self, name, data):
635 self._addToBuffers( ('log', name), data) 636 637 if self.timer: 638 self.timer.reset(self.timeout)
639
640 - def finished(self, sig, rc):
641 self.elapsedTime = util.now(self._reactor) - self.startTime 642 log.msg("command finished with signal %s, exit code %s, elapsedTime: %0.6f" % (sig,rc,self.elapsedTime)) 643 for w in self.logFileWatchers: 644 # this will send the final updates 645 w.stop() 646 self._sendBuffers() 647 if sig is not None: 648 rc = -1 649 if self.sendRC: 650 if sig is not None: 651 self.sendStatus( 652 {'header': "process killed by signal %d\n" % sig}) 653 self.sendStatus({'rc': rc}) 654 self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime}) 655 if self.timer: 656 self.timer.cancel() 657 self.timer = None 658 if self.maxTimer: 659 self.maxTimer.cancel() 660 self.maxTimer = None 661 if self.buftimer: 662 self.buftimer.cancel() 663 self.buftimer = None 664 d = self.deferred 665 self.deferred = None 666 if d: 667 d.callback(rc) 668 else: 669 log.msg("Hey, command %s finished twice" % self)
670
671 - def failed(self, why):
672 self._sendBuffers() 673 log.msg("RunProcess.failed: command failed: %s" % (why,)) 674 if self.timer: 675 self.timer.cancel() 676 self.timer = None 677 if self.maxTimer: 678 self.maxTimer.cancel() 679 self.maxTimer = None 680 if self.buftimer: 681 self.buftimer.cancel() 682 self.buftimer = None 683 d = self.deferred 684 self.deferred = None 685 if d: 686 d.errback(why) 687 else: 688 log.msg("Hey, command %s finished twice" % self)
689
690 - def doTimeout(self):
691 self.timer = None 692 msg = "command timed out: %d seconds without output" % self.timeout 693 self.kill(msg)
694
695 - def doMaxTimeout(self):
696 self.maxTimer = None 697 msg = "command timed out: %d seconds elapsed" % self.maxTime 698 self.kill(msg)
699
700 - def kill(self, msg):
701 # This may be called by the timeout, or when the user has decided to 702 # abort this build. 703 self._sendBuffers() 704 if self.timer: 705 self.timer.cancel() 706 self.timer = None 707 if self.maxTimer: 708 self.maxTimer.cancel() 709 self.maxTimer = None 710 if self.buftimer: 711 self.buftimer.cancel() 712 self.buftimer = None 713 if hasattr(self.process, "pid") and self.process.pid is not None: 714 msg += ", killing pid %s" % self.process.pid 715 log.msg(msg) 716 self.sendStatus({'header': "\n" + msg + "\n"}) 717 718 # let the PP know that we are killing it, so that it can ensure that 719 # the exit status comes out right 720 self.pp.killed = True 721 722 hit = 0 723 if runtime.platformType == "posix": 724 try: 725 # really want to kill off all child processes too. Process 726 # Groups are ideal for this, but that requires 727 # spawnProcess(usePTY=1). Try both ways in case process was 728 # not started that way. 729 730 # the test suite sets self.KILL=None to tell us we should 731 # only pretend to kill the child. This lets us test the 732 # backup timer. 733 734 sig = None 735 if self.KILL is not None: 736 sig = getattr(signal, "SIG"+ self.KILL, None) 737 738 if self.KILL == None: 739 log.msg("self.KILL==None, only pretending to kill child") 740 elif sig is None: 741 log.msg("signal module is missing SIG%s" % self.KILL) 742 elif not hasattr(os, "kill"): 743 log.msg("os module is missing the 'kill' function") 744 elif not hasattr(self.process, "pid") or self.process.pid is None: 745 log.msg("self.process has no pid") 746 else: 747 log.msg("trying os.kill(-pid, %d)" % (sig,)) 748 # TODO: maybe use os.killpg instead of a negative pid? 749 os.kill(-self.process.pid, sig) 750 log.msg(" signal %s sent successfully" % sig) 751 hit = 1 752 except OSError: 753 # probably no-such-process, maybe because there is no process 754 # group 755 pass 756 if not hit: 757 try: 758 if self.KILL is None: 759 log.msg("self.KILL==None, only pretending to kill child") 760 else: 761 log.msg("trying process.signalProcess('KILL')") 762 self.process.signalProcess(self.KILL) 763 log.msg(" signal %s sent successfully" % (self.KILL,)) 764 hit = 1 765 except OSError: 766 # could be no-such-process, because they finished very recently 767 pass 768 except error.ProcessExitedAlready: 769 # Twisted thinks the process has already exited 770 pass 771 if not hit: 772 log.msg("signalProcess/os.kill failed both times") 773 774 if runtime.platformType == "posix": 775 # we only do this under posix because the win32eventreactor 776 # blocks here until the process has terminated, while closing 777 # stderr. This is weird. 778 self.pp.transport.loseConnection() 779 780 # finished ought to be called momentarily. Just in case it doesn't, 781 # set a timer which will abandon the command. 782 self.timer = self._reactor.callLater(self.BACKUP_TIMEOUT, 783 self.doBackupTimeout)
784
785 - def doBackupTimeout(self):
786 log.msg("we tried to kill the process, and it wouldn't die.." 787 " finish anyway") 788 self.timer = None 789 self.sendStatus({'header': "SIGKILL failed to kill process\n"}) 790 if self.sendRC: 791 self.sendStatus({'header': "using fake rc=-1\n"}) 792 self.sendStatus({'rc': -1}) 793 self.failed(RuntimeError("SIGKILL failed to kill process"))
794 795
796 - def writeStdin(self, data):
797 self.pp.writeStdin(data)
798
799 - def closeStdin(self):
800 self.pp.closeStdin()
801