Package buildslave :: Module runprocess
[frames] | no frames]

Source Code for Module buildslave.runprocess

  1  """ 
  2  Support for running 'shell commands' 
  3  """ 
  4   
  5  import os 
  6  import sys 
  7  import signal 
  8  import types 
  9  import re 
 10  import traceback 
 11  import stat 
 12  from collections import deque 
 13   
 14  from twisted.python import runtime, log 
 15  from twisted.internet import reactor, defer, protocol, task 
 16   
 17  from buildslave import util 
 18  from buildslave.exceptions import AbandonChain 
 19   
20 -class LogFileWatcher:
21 POLL_INTERVAL = 2 22
23 - def __init__(self, command, name, logfile, follow=False):
24 self.command = command 25 self.name = name 26 self.logfile = logfile 27 28 log.msg("LogFileWatcher created to watch %s" % logfile) 29 # we are created before the ShellCommand starts. If the logfile we're 30 # supposed to be watching already exists, record its size and 31 # ctime/mtime so we can tell when it starts to change. 32 self.old_logfile_stats = self.statFile() 33 self.started = False 34 35 # follow the file, only sending back lines 36 # added since we started watching 37 self.follow = follow 38 39 # every 2 seconds we check on the file again 40 self.poller = task.LoopingCall(self.poll)
41
42 - def start(self):
43 self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
44
45 - def _cleanupPoll(self, err):
46 log.err(err, msg="Polling error") 47 self.poller = None
48
49 - def stop(self):
50 self.poll() 51 if self.poller is not None: 52 self.poller.stop() 53 if self.started: 54 self.f.close()
55
56 - def statFile(self):
57 if os.path.exists(self.logfile): 58 s = os.stat(self.logfile) 59 return (s[stat.ST_CTIME], s[stat.ST_MTIME], s[stat.ST_SIZE]) 60 return None
61
62 - def poll(self):
63 if not self.started: 64 s = self.statFile() 65 if s == self.old_logfile_stats: 66 return # not started yet 67 if not s: 68 # the file was there, but now it's deleted. Forget about the 69 # initial state, clearly the process has deleted the logfile 70 # in preparation for creating a new one. 71 self.old_logfile_stats = None 72 return # no file to work with 73 self.f = open(self.logfile, "rb") 74 # if we only want new lines, seek to 75 # where we stat'd so we only find new 76 # lines 77 if self.follow: 78 self.f.seek(s[2], 0) 79 self.started = True 80 self.f.seek(self.f.tell(), 0) 81 while True: 82 data = self.f.read(10000) 83 if not data: 84 return 85 self.command.addLogfile(self.name, data)
86 87
88 -class RunProcessPP(protocol.ProcessProtocol):
89 debug = False 90
91 - def __init__(self, command):
92 self.command = command 93 self.pending_stdin = "" 94 self.stdin_finished = False 95 self.killed = False
96
97 - def writeStdin(self, data):
98 assert not self.stdin_finished 99 if self.connected: 100 self.transport.write(data) 101 else: 102 self.pending_stdin += data
103
104 - def closeStdin(self):
105 if self.connected: 106 if self.debug: log.msg(" closing stdin") 107 self.transport.closeStdin() 108 self.stdin_finished = True
109
110 - def connectionMade(self):
111 if self.debug: 112 log.msg("RunProcessPP.connectionMade") 113 if not self.command.process: 114 if self.debug: 115 log.msg(" assigning self.command.process: %s" % 116 (self.transport,)) 117 self.command.process = self.transport 118 119 # TODO: maybe we shouldn't close stdin when using a PTY. I can't test 120 # this yet, recent debian glibc has a bug which causes thread-using 121 # test cases to SIGHUP trial, and the workaround is to either run 122 # the whole test with /bin/sh -c " ".join(argv) (way gross) or to 123 # not use a PTY. Once the bug is fixed, I'll be able to test what 124 # happens when you close stdin on a pty. My concern is that it will 125 # SIGHUP the child (since we are, in a sense, hanging up on them). 126 # But it may well be that keeping stdout open prevents the SIGHUP 127 # from being sent. 128 #if not self.command.usePTY: 129 130 if self.pending_stdin: 131 if self.debug: log.msg(" writing to stdin") 132 self.transport.write(self.pending_stdin) 133 if self.stdin_finished: 134 if self.debug: log.msg(" closing stdin") 135 self.transport.closeStdin()
136
137 - def outReceived(self, data):
138 if self.debug: 139 log.msg("RunProcessPP.outReceived") 140 self.command.addStdout(data)
141
142 - def errReceived(self, data):
143 if self.debug: 144 log.msg("RunProcessPP.errReceived") 145 self.command.addStderr(data)
146
147 - def processEnded(self, status_object):
148 if self.debug: 149 log.msg("RunProcessPP.processEnded", status_object) 150 # status_object is a Failure wrapped around an 151 # error.ProcessTerminated or and error.ProcessDone. 152 # requires twisted >= 1.0.4 to overcome a bug in process.py 153 sig = status_object.value.signal 154 rc = status_object.value.exitCode 155 156 # sometimes, even when we kill a process, GetExitCodeProcess will still return 157 # a zero exit status. So we force it. See 158 # http://stackoverflow.com/questions/2061735/42-passed-to-terminateprocess-sometimes-getexitcodeprocess-returns-0 159 if self.killed and rc == 0: 160 log.msg("process was killed, but exited with status 0; faking a failure") 161 # windows returns '1' even for signalled failsres, while POSIX returns -1 162 if runtime.platformType == 'win32': 163 rc = 1 164 else: 165 rc = -1 166 self.command.finished(sig, rc)
167
168 -class RunProcess:
169 """ 170 This is a helper class, used by slave commands to run programs in a child 171 shell. 172 """ 173 174 notreally = False 175 BACKUP_TIMEOUT = 5 176 KILL = "KILL" 177 CHUNK_LIMIT = 128*1024 178 179 # Don't send any data until at least BUFFER_SIZE bytes have been collected 180 # or BUFFER_TIMEOUT elapsed 181 BUFFER_SIZE = 64*1024 182 BUFFER_TIMEOUT = 5 183 184 # For sending elapsed time: 185 startTime = None 186 elapsedTime = None 187 188 # For scheduling future events 189 _reactor = reactor 190 191 # I wish we had easy access to CLOCK_MONOTONIC in Python: 192 # http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html 193 # Then changes to the system clock during a run wouldn't effect the "elapsed 194 # time" results. 195
196 - def __init__(self, builder, command, 197 workdir, environ=None, 198 sendStdout=True, sendStderr=True, sendRC=True, 199 timeout=None, maxTime=None, initialStdin=None, 200 keepStdinOpen=False, keepStdout=False, keepStderr=False, 201 logEnviron=True, logfiles={}, usePTY="slave-config"):
202 """ 203 204 @param keepStdout: if True, we keep a copy of all the stdout text 205 that we've seen. This copy is available in 206 self.stdout, which can be read after the command 207 has finished. 208 @param keepStderr: same, for stderr 209 210 @param usePTY: "slave-config" -> use the SlaveBuilder's usePTY; 211 otherwise, true to use a PTY, false to not use a PTY. 212 """ 213 214 self.builder = builder 215 self.command = util.Obfuscated.get_real(command) 216 217 # We need to take unicode commands and arguments and encode them using 218 # the appropriate encoding for the slave. This is mostly platform 219 # specific, but can be overridden in the slave's buildbot.tac file. 220 # 221 # Encoding the command line here ensures that the called executables 222 # receive arguments as bytestrings encoded with an appropriate 223 # platform-specific encoding. It also plays nicely with twisted's 224 # spawnProcess which checks that arguments are regular strings or 225 # unicode strings that can be encoded as ascii (which generates a 226 # warning). 227 if isinstance(self.command, (tuple, list)): 228 for i, a in enumerate(self.command): 229 if isinstance(a, unicode): 230 self.command[i] = a.encode(self.builder.unicode_encoding) 231 elif isinstance(self.command, unicode): 232 self.command = self.command.encode(self.builder.unicode_encoding) 233 234 self.fake_command = util.Obfuscated.get_fake(command) 235 self.sendStdout = sendStdout 236 self.sendStderr = sendStderr 237 self.sendRC = sendRC 238 self.logfiles = logfiles 239 self.workdir = workdir 240 if not os.path.exists(workdir): 241 os.makedirs(workdir) 242 if environ: 243 if environ.has_key('PYTHONPATH'): 244 ppath = environ['PYTHONPATH'] 245 # Need to do os.pathsep translation. We could either do that 246 # by replacing all incoming ':'s with os.pathsep, or by 247 # accepting lists. I like lists better. 248 if not isinstance(ppath, str): 249 # If it's not a string, treat it as a sequence to be 250 # turned in to a string. 251 ppath = os.pathsep.join(ppath) 252 253 environ['PYTHONPATH'] = ppath + os.pathsep + "${PYTHONPATH}" 254 255 # do substitution on variable values matching patern: ${name} 256 p = re.compile('\${([0-9a-zA-Z_]*)}') 257 def subst(match): 258 return os.environ.get(match.group(1), "")
259 newenv = {} 260 for key in os.environ.keys(): 261 # setting a key to None will delete it from the slave environment 262 if key not in environ or environ[key] is not None: 263 newenv[key] = os.environ[key] 264 for key in environ.keys(): 265 if environ[key] is not None: 266 newenv[key] = p.sub(subst, environ[key]) 267 268 self.environ = newenv 269 else: # not environ 270 self.environ = os.environ.copy() 271 self.initialStdin = initialStdin 272 self.keepStdinOpen = keepStdinOpen 273 self.logEnviron = logEnviron 274 self.timeout = timeout 275 self.timer = None 276 self.maxTime = maxTime 277 self.maxTimer = None 278 self.keepStdout = keepStdout 279 self.keepStderr = keepStderr 280 281 self.buffered = deque() 282 self.buflen = 0 283 self.buftimer = None 284 285 if usePTY == "slave-config": 286 self.usePTY = self.builder.usePTY 287 else: 288 self.usePTY = usePTY 289 290 # usePTY=True is a convenience for cleaning up all children and 291 # grandchildren of a hung command. Fall back to usePTY=False on systems 292 # and in situations where ptys cause problems. PTYs are posix-only, 293 # and for .closeStdin to matter, we must use a pipe, not a PTY 294 if runtime.platformType != "posix" or initialStdin is not None: 295 if self.usePTY and usePTY != "slave-config": 296 self.sendStatus({'header': "WARNING: disabling usePTY for this command"}) 297 self.usePTY = False 298 299 self.logFileWatchers = [] 300 for name,filevalue in self.logfiles.items(): 301 filename = filevalue 302 follow = False 303 304 # check for a dictionary of options 305 # filename is required, others are optional 306 if type(filevalue) == dict: 307 filename = filevalue['filename'] 308 follow = filevalue.get('follow', False) 309 310 w = LogFileWatcher(self, name, 311 os.path.join(self.workdir, filename), 312 follow=follow) 313 self.logFileWatchers.append(w)
314
315 - def __repr__(self):
316 return "<%s '%s'>" % (self.__class__.__name__, self.fake_command)
317
318 - def sendStatus(self, status):
319 self.builder.sendUpdate(status)
320
321 - def start(self):
322 # return a Deferred which fires (with the exit code) when the command 323 # completes 324 if self.keepStdout: 325 self.stdout = "" 326 if self.keepStderr: 327 self.stderr = "" 328 self.deferred = defer.Deferred() 329 try: 330 self._startCommand() 331 except: 332 log.msg("error in RunProcess._startCommand") 333 log.err() 334 self._addToBuffers('stderr', "error in RunProcess._startCommand\n") 335 self._addToBuffers('stderr', traceback.format_exc()) 336 self._sendBuffers() 337 # pretend it was a shell error 338 self.deferred.errback(AbandonChain(-1)) 339 return self.deferred
340
341 - def _startCommand(self):
342 # ensure workdir exists 343 if not os.path.isdir(self.workdir): 344 os.makedirs(self.workdir) 345 log.msg("RunProcess._startCommand") 346 if self.notreally: 347 self._addToBuffers('header', "command '%s' in dir %s" % \ 348 (self.fake_command, self.workdir)) 349 self._addToBuffers('header', "(not really)\n") 350 self.finished(None, 0) 351 return 352 353 self.pp = RunProcessPP(self) 354 355 if type(self.command) in types.StringTypes: 356 if runtime.platformType == 'win32': 357 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args 358 if '/c' not in argv: argv += ['/c'] 359 argv += [self.command] 360 else: 361 # for posix, use /bin/sh. for other non-posix, well, doesn't 362 # hurt to try 363 argv = ['/bin/sh', '-c', self.command] 364 display = self.fake_command 365 else: 366 # On windows, CreateProcess requires an absolute path to the executable. 367 # When we call spawnProcess below, we pass argv[0] as the executable. 368 # So, for .exe's that we have absolute paths to, we can call directly 369 # Otherwise, we should run under COMSPEC (usually cmd.exe) to 370 # handle path searching, etc. 371 if runtime.platformType == 'win32' and not \ 372 (self.command[0].lower().endswith(".exe") and os.path.isabs(self.command[0])): 373 argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args 374 if '/c' not in argv: argv += ['/c'] 375 argv += list(self.command) 376 else: 377 argv = self.command 378 display = " ".join(self.fake_command) 379 380 # $PWD usually indicates the current directory; spawnProcess may not 381 # update this value, though, so we set it explicitly here. This causes 382 # weird problems (bug #456) on msys, though.. 383 if not self.environ.get('MACHTYPE', None) == 'i686-pc-msys': 384 self.environ['PWD'] = os.path.abspath(self.workdir) 385 386 # self.stdin is handled in RunProcessPP.connectionMade 387 388 # first header line is the command in plain text, argv joined with 389 # spaces. You should be able to cut-and-paste this into a shell to 390 # obtain the same results. If there are spaces in the arguments, too 391 # bad. 392 log.msg(" " + display) 393 self._addToBuffers('header', display+"\n") 394 395 # then comes the secondary information 396 msg = " in dir %s" % (self.workdir,) 397 if self.timeout: 398 msg += " (timeout %d secs)" % (self.timeout,) 399 log.msg(" " + msg) 400 self._addToBuffers('header', msg+"\n") 401 402 msg = " watching logfiles %s" % (self.logfiles,) 403 log.msg(" " + msg) 404 self._addToBuffers('header', msg+"\n") 405 406 # then the obfuscated command array for resolving unambiguity 407 msg = " argv: %s" % (self.fake_command,) 408 log.msg(" " + msg) 409 self._addToBuffers('header', msg+"\n") 410 411 # then the environment, since it sometimes causes problems 412 if self.logEnviron: 413 msg = " environment:\n" 414 env_names = self.environ.keys() 415 env_names.sort() 416 for name in env_names: 417 msg += " %s=%s\n" % (name, self.environ[name]) 418 log.msg(" environment: %s" % (self.environ,)) 419 self._addToBuffers('header', msg) 420 421 if self.initialStdin: 422 msg = " writing %d bytes to stdin" % len(self.initialStdin) 423 log.msg(" " + msg) 424 self._addToBuffers('header', msg+"\n") 425 426 if self.keepStdinOpen: 427 msg = " leaving stdin open" 428 else: 429 msg = " closing stdin" 430 log.msg(" " + msg) 431 self._addToBuffers('header', msg+"\n") 432 433 msg = " using PTY: %s" % bool(self.usePTY) 434 log.msg(" " + msg) 435 self._addToBuffers('header', msg+"\n") 436 437 # this will be buffered until connectionMade is called 438 if self.initialStdin: 439 self.pp.writeStdin(self.initialStdin) 440 if not self.keepStdinOpen: 441 self.pp.closeStdin() 442 443 # win32eventreactor's spawnProcess (under twisted <= 2.0.1) returns 444 # None, as opposed to all the posixbase-derived reactors (which 445 # return the new Process object). This is a nuisance. We can make up 446 # for it by having the ProcessProtocol give us their .transport 447 # attribute after they get one. I'd prefer to get it from 448 # spawnProcess because I'm concerned about returning from this method 449 # without having a valid self.process to work with. (if kill() were 450 # called right after we return, but somehow before connectionMade 451 # were called, then kill() would blow up). 452 self.process = None 453 self.startTime = util.now(self._reactor) 454 455 p = reactor.spawnProcess(self.pp, argv[0], argv, 456 self.environ, 457 self.workdir, 458 usePTY=self.usePTY) 459 # connectionMade might have been called during spawnProcess 460 if not self.process: 461 self.process = p 462 463 # connectionMade also closes stdin as long as we're not using a PTY. 464 # This is intended to kill off inappropriately interactive commands 465 # better than the (long) hung-command timeout. ProcessPTY should be 466 # enhanced to allow the same childFDs argument that Process takes, 467 # which would let us connect stdin to /dev/null . 468 469 if self.timeout: 470 self.timer = self._reactor.callLater(self.timeout, self.doTimeout) 471 472 if self.maxTime: 473 self.maxTimer = self._reactor.callLater(self.maxTime, self.doMaxTimeout) 474 475 for w in self.logFileWatchers: 476 w.start()
477 478
479 - def _chunkForSend(self, data):
480 """ 481 limit the chunks that we send over PB to 128k, since it has a hardwired 482 string-size limit of 640k. 483 """ 484 LIMIT = self.CHUNK_LIMIT 485 for i in range(0, len(data), LIMIT): 486 yield data[i:i+LIMIT]
487
488 - def _collapseMsg(self, msg):
489 """ 490 Take msg, which is a dictionary of lists of output chunks, and 491 concatentate all the chunks into a single string 492 """ 493 retval = {} 494 for log in msg: 495 data = "".join(msg[log]) 496 if isinstance(log, tuple) and log[0] == 'log': 497 retval['log'] = (log[1], data) 498 else: 499 retval[log] = data 500 return retval
501
502 - def _sendMessage(self, msg):
503 """ 504 Collapse and send msg to the master 505 """ 506 if not msg: 507 return 508 msg = self._collapseMsg(msg) 509 self.sendStatus(msg)
510
511 - def _bufferTimeout(self):
512 self.buftimer = None 513 self._sendBuffers()
514
515 - def _sendBuffers(self):
516 """ 517 Send all the content in our buffers. 518 """ 519 msg = {} 520 msg_size = 0 521 lastlog = None 522 logdata = [] 523 while self.buffered: 524 # Grab the next bits from the buffer 525 logname, data = self.buffered.popleft() 526 527 # If this log is different than the last one, then we have to send 528 # out the message so far. This is because the message is 529 # transferred as a dictionary, which makes the ordering of keys 530 # unspecified, and makes it impossible to interleave data from 531 # different logs. A future enhancement could be to change the 532 # master to support a list of (logname, data) tuples instead of a 533 # dictionary. 534 # On our first pass through this loop lastlog is None 535 if lastlog is None: 536 lastlog = logname 537 elif logname != lastlog: 538 self._sendMessage(msg) 539 msg = {} 540 msg_size = 0 541 lastlog = logname 542 543 logdata = msg.setdefault(logname, []) 544 545 # Chunkify the log data to make sure we're not sending more than 546 # CHUNK_LIMIT at a time 547 for chunk in self._chunkForSend(data): 548 if len(chunk) == 0: continue 549 logdata.append(chunk) 550 msg_size += len(chunk) 551 if msg_size >= self.CHUNK_LIMIT: 552 # We've gone beyond the chunk limit, so send out our 553 # message. At worst this results in a message slightly 554 # larger than (2*CHUNK_LIMIT)-1 555 self._sendMessage(msg) 556 msg = {} 557 logdata = msg.setdefault(logname, []) 558 msg_size = 0 559 self.buflen = 0 560 if logdata: 561 self._sendMessage(msg) 562 if self.buftimer: 563 if self.buftimer.active(): 564 self.buftimer.cancel() 565 self.buftimer = None
566
567 - def _addToBuffers(self, logname, data):
568 """ 569 Add data to the buffer for logname 570 Start a timer to send the buffers if BUFFER_TIMEOUT elapses. 571 If adding data causes the buffer size to grow beyond BUFFER_SIZE, then 572 the buffers will be sent. 573 """ 574 n = len(data) 575 576 self.buflen += n 577 self.buffered.append((logname, data)) 578 if self.buflen > self.BUFFER_SIZE: 579 self._sendBuffers() 580 elif not self.buftimer: 581 self.buftimer = self._reactor.callLater(self.BUFFER_TIMEOUT, self._bufferTimeout)
582
583 - def addStdout(self, data):
584 if self.sendStdout: 585 self._addToBuffers('stdout', data) 586 587 if self.keepStdout: 588 self.stdout += data 589 if self.timer: 590 self.timer.reset(self.timeout)
591
592 - def addStderr(self, data):
593 if self.sendStderr: 594 self._addToBuffers('stderr', data) 595 596 if self.keepStderr: 597 self.stderr += data 598 if self.timer: 599 self.timer.reset(self.timeout)
600
601 - def addLogfile(self, name, data):
602 self._addToBuffers( ('log', name), data) 603 604 if self.timer: 605 self.timer.reset(self.timeout)
606
607 - def finished(self, sig, rc):
608 self.elapsedTime = util.now(self._reactor) - self.startTime 609 log.msg("command finished with signal %s, exit code %s, elapsedTime: %0.6f" % (sig,rc,self.elapsedTime)) 610 for w in self.logFileWatchers: 611 # this will send the final updates 612 w.stop() 613 self._sendBuffers() 614 if sig is not None: 615 rc = -1 616 if self.sendRC: 617 if sig is not None: 618 self.sendStatus( 619 {'header': "process killed by signal %d\n" % sig}) 620 self.sendStatus({'rc': rc}) 621 self.sendStatus({'header': "elapsedTime=%0.6f\n" % self.elapsedTime}) 622 if self.timer: 623 self.timer.cancel() 624 self.timer = None 625 if self.maxTimer: 626 self.maxTimer.cancel() 627 self.maxTimer = None 628 if self.buftimer: 629 self.buftimer.cancel() 630 self.buftimer = None 631 d = self.deferred 632 self.deferred = None 633 if d: 634 d.callback(rc) 635 else: 636 log.msg("Hey, command %s finished twice" % self)
637
638 - def failed(self, why):
639 self._sendBuffers() 640 log.msg("RunProcess.failed: command failed: %s" % (why,)) 641 if self.timer: 642 self.timer.cancel() 643 self.timer = None 644 if self.maxTimer: 645 self.maxTimer.cancel() 646 self.maxTimer = None 647 if self.buftimer: 648 self.buftimer.cancel() 649 self.buftimer = None 650 d = self.deferred 651 self.deferred = None 652 if d: 653 d.errback(why) 654 else: 655 log.msg("Hey, command %s finished twice" % self)
656
657 - def doTimeout(self):
658 self.timer = None 659 msg = "command timed out: %d seconds without output" % self.timeout 660 self.kill(msg)
661
662 - def doMaxTimeout(self):
663 self.maxTimer = None 664 msg = "command timed out: %d seconds elapsed" % self.maxTime 665 self.kill(msg)
666
667 - def kill(self, msg):
668 # This may be called by the timeout, or when the user has decided to 669 # abort this build. 670 self._sendBuffers() 671 if self.timer: 672 self.timer.cancel() 673 self.timer = None 674 if self.maxTimer: 675 self.maxTimer.cancel() 676 self.maxTimer = None 677 if self.buftimer: 678 self.buftimer.cancel() 679 self.buftimer = None 680 if hasattr(self.process, "pid") and self.process.pid is not None: 681 msg += ", killing pid %s" % self.process.pid 682 log.msg(msg) 683 self.sendStatus({'header': "\n" + msg + "\n"}) 684 685 # let the PP know that we are killing it, so that it can ensure that 686 # the exit status comes out right 687 self.pp.killed = True 688 689 hit = 0 690 if runtime.platformType == "posix": 691 try: 692 # really want to kill off all child processes too. Process 693 # Groups are ideal for this, but that requires 694 # spawnProcess(usePTY=1). Try both ways in case process was 695 # not started that way. 696 697 # the test suite sets self.KILL=None to tell us we should 698 # only pretend to kill the child. This lets us test the 699 # backup timer. 700 701 sig = None 702 if self.KILL is not None: 703 sig = getattr(signal, "SIG"+ self.KILL, None) 704 705 if self.KILL == None: 706 log.msg("self.KILL==None, only pretending to kill child") 707 elif sig is None: 708 log.msg("signal module is missing SIG%s" % self.KILL) 709 elif not hasattr(os, "kill"): 710 log.msg("os module is missing the 'kill' function") 711 elif not hasattr(self.process, "pid") or self.process.pid is None: 712 log.msg("self.process has no pid") 713 else: 714 log.msg("trying os.kill(-pid, %d)" % (sig,)) 715 # TODO: maybe use os.killpg instead of a negative pid? 716 os.kill(-self.process.pid, sig) 717 log.msg(" signal %s sent successfully" % sig) 718 hit = 1 719 except OSError: 720 # probably no-such-process, maybe because there is no process 721 # group 722 pass 723 if not hit: 724 try: 725 if self.KILL is None: 726 log.msg("self.KILL==None, only pretending to kill child") 727 else: 728 log.msg("trying process.signalProcess('KILL')") 729 self.process.signalProcess(self.KILL) 730 log.msg(" signal %s sent successfully" % (self.KILL,)) 731 hit = 1 732 except OSError: 733 # could be no-such-process, because they finished very recently 734 pass 735 if not hit: 736 log.msg("signalProcess/os.kill failed both times") 737 738 if runtime.platformType == "posix": 739 # we only do this under posix because the win32eventreactor 740 # blocks here until the process has terminated, while closing 741 # stderr. This is weird. 742 self.pp.transport.loseConnection() 743 744 # finished ought to be called momentarily. Just in case it doesn't, 745 # set a timer which will abandon the command. 746 self.timer = self._reactor.callLater(self.BACKUP_TIMEOUT, 747 self.doBackupTimeout)
748
749 - def doBackupTimeout(self):
750 log.msg("we tried to kill the process, and it wouldn't die.." 751 " finish anyway") 752 self.timer = None 753 self.sendStatus({'header': "SIGKILL failed to kill process\n"}) 754 if self.sendRC: 755 self.sendStatus({'header': "using fake rc=-1\n"}) 756 self.sendStatus({'rc': -1}) 757 self.failed(RuntimeError("SIGKILL failed to kill process"))
758 759
760 - def writeStdin(self, data):
761 self.pp.writeStdin(data)
762
763 - def closeStdin(self):
764 self.pp.closeStdin()
765