1
|
|
# This file is part of Buildbot. Buildbot is free software: you can
|
2
|
|
# redistribute it and/or modify it under the terms of the GNU General Public
|
3
|
|
# License as published by the Free Software Foundation, version 2.
|
4
|
|
#
|
5
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT
|
6
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
7
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
8
|
|
# details.
|
9
|
|
#
|
10
|
|
# You should have received a copy of the GNU General Public License along with
|
11
|
|
# this program; if not, write to the Free Software Foundation, Inc., 51
|
12
|
|
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
13
|
|
#
|
14
|
|
# Copyright Buildbot Team Members
|
15
|
|
|
16
|
4
|
"""
|
17
|
|
Support for running 'shell commands'
|
18
|
|
"""
|
19
|
|
|
20
|
4
|
from __future__ import absolute_import
|
21
|
4
|
from __future__ import print_function
|
22
|
4
|
from future.builtins import range
|
23
|
4
|
from future.utils import PY3
|
24
|
4
|
from future.utils import iteritems
|
25
|
4
|
from future.utils import string_types
|
26
|
4
|
from future.utils import text_type
|
27
|
|
|
28
|
4
|
import os
|
29
|
4
|
import pprint
|
30
|
4
|
import re
|
31
|
4
|
import signal
|
32
|
4
|
import stat
|
33
|
4
|
import subprocess
|
34
|
4
|
import sys
|
35
|
4
|
import traceback
|
36
|
4
|
from codecs import getincrementaldecoder
|
37
|
4
|
from collections import deque
|
38
|
4
|
from tempfile import NamedTemporaryFile
|
39
|
|
|
40
|
4
|
from twisted.internet import defer
|
41
|
4
|
from twisted.internet import error
|
42
|
4
|
from twisted.internet import protocol
|
43
|
4
|
from twisted.internet import reactor
|
44
|
4
|
from twisted.internet import task
|
45
|
4
|
from twisted.python import failure
|
46
|
4
|
from twisted.python import log
|
47
|
4
|
from twisted.python import runtime
|
48
|
4
|
from twisted.python.win32 import quoteArguments
|
49
|
|
|
50
|
4
|
from buildbot_worker import util
|
51
|
4
|
from buildbot_worker.compat import bytes2NativeString
|
52
|
4
|
from buildbot_worker.compat import bytes2unicode
|
53
|
4
|
from buildbot_worker.compat import unicode2bytes
|
54
|
4
|
from buildbot_worker.exceptions import AbandonChain
|
55
|
|
|
56
|
4
|
if runtime.platformType == 'posix':
|
57
|
4
|
from twisted.internet.process import Process
|
58
|
|
|
59
|
|
|
60
|
4
|
def win32_batch_quote(cmd_list, unicode_encoding='utf-8'):
|
61
|
|
# Quote cmd_list to a string that is suitable for inclusion in a
|
62
|
|
# Windows batch file. This is not quite the same as quoting it for the
|
63
|
|
# shell, as cmd.exe doesn't support the %% escape in interactive mode.
|
64
|
4
|
def escape_arg(arg):
|
65
|
4
|
arg = bytes2NativeString(arg, unicode_encoding)
|
66
|
4
|
arg = quoteArguments([arg])
|
67
|
|
# escape shell special characters
|
68
|
4
|
arg = re.sub(r'[@()^"<>&|]', r'^\g<0>', arg)
|
69
|
|
# prevent variable expansion
|
70
|
4
|
return arg.replace('%', '%%')
|
71
|
|
|
72
|
4
|
return ' '.join(map(escape_arg, cmd_list))
|
73
|
|
|
74
|
|
|
75
|
4
|
def shell_quote(cmd_list, unicode_encoding='utf-8'):
|
76
|
|
# attempt to quote cmd_list such that a shell will properly re-interpret
|
77
|
|
# it. The pipes module is only available on UNIX; also, the quote
|
78
|
|
# function is undocumented (although it looks like it will be documented
|
79
|
|
# soon: http://bugs.python.org/issue9723). Finally, it has a nasty bug
|
80
|
|
# in some versions where an empty string is not quoted.
|
81
|
|
#
|
82
|
|
# So:
|
83
|
|
# - use pipes.quote on UNIX, handling '' as a special case
|
84
|
|
# - use our own custom function on Windows
|
85
|
4
|
if isinstance(cmd_list, bytes):
|
86
|
0
|
cmd_list = bytes2unicode(cmd_list, unicode_encoding)
|
87
|
|
|
88
|
4
|
if runtime.platformType == 'win32':
|
89
|
0
|
return win32_batch_quote(cmd_list, unicode_encoding)
|
90
|
|
|
91
|
|
# only available on unix
|
92
|
4
|
import pipes # pylint: disable=import-outside-toplevel
|
93
|
|
|
94
|
4
|
def quote(e):
|
95
|
4
|
if not e:
|
96
|
0
|
return u'""'
|
97
|
4
|
e = bytes2unicode(e, unicode_encoding)
|
98
|
4
|
return pipes.quote(e)
|
99
|
4
|
return u" ".join([quote(e) for e in cmd_list])
|
100
|
|
|
101
|
|
|
102
|
4
|
class LogFileWatcher(object):
|
103
|
4
|
POLL_INTERVAL = 2
|
104
|
|
|
105
|
4
|
def __init__(self, command, name, logfile, follow=False, poll=True):
|
106
|
4
|
self.command = command
|
107
|
4
|
self.name = name
|
108
|
4
|
self.logfile = logfile
|
109
|
4
|
decoderFactory = getincrementaldecoder(
|
110
|
|
self.command.builder.unicode_encoding)
|
111
|
4
|
self.logDecode = decoderFactory(errors='replace')
|
112
|
|
|
113
|
4
|
log.msg("LogFileWatcher created to watch {0}".format(logfile))
|
114
|
|
# we are created before the ShellCommand starts. If the logfile we're
|
115
|
|
# supposed to be watching already exists, record its size and
|
116
|
|
# ctime/mtime so we can tell when it starts to change.
|
117
|
4
|
self.old_logfile_stats = self.statFile()
|
118
|
4
|
self.started = False
|
119
|
|
|
120
|
|
# follow the file, only sending back lines
|
121
|
|
# added since we started watching
|
122
|
4
|
self.follow = follow
|
123
|
|
|
124
|
|
# every 2 seconds we check on the file again
|
125
|
4
|
self.poller = task.LoopingCall(self.poll) if poll else None
|
126
|
|
|
127
|
4
|
def start(self):
|
128
|
0
|
self.poller.start(self.POLL_INTERVAL).addErrback(self._cleanupPoll)
|
129
|
|
|
130
|
4
|
def _cleanupPoll(self, err):
|
131
|
0
|
log.err(err, msg="Polling error")
|
132
|
0
|
self.poller = None
|
133
|
|
|
134
|
4
|
def stop(self):
|
135
|
4
|
self.poll()
|
136
|
4
|
if self.poller is not None:
|
137
|
0
|
self.poller.stop()
|
138
|
4
|
if self.started:
|
139
|
4
|
self.f.close()
|
140
|
|
|
141
|
4
|
def statFile(self):
|
142
|
4
|
if os.path.exists(self.logfile):
|
143
|
4
|
s = os.stat(self.logfile)
|
144
|
4
|
return (s[stat.ST_CTIME], s[stat.ST_MTIME], s[stat.ST_SIZE])
|
145
|
4
|
return None
|
146
|
|
|
147
|
4
|
def poll(self):
|
148
|
4
|
if not self.started:
|
149
|
4
|
s = self.statFile()
|
150
|
4
|
if s == self.old_logfile_stats:
|
151
|
0
|
return # not started yet
|
152
|
4
|
if not s:
|
153
|
|
# the file was there, but now it's deleted. Forget about the
|
154
|
|
# initial state, clearly the process has deleted the logfile
|
155
|
|
# in preparation for creating a new one.
|
156
|
0
|
self.old_logfile_stats = None
|
157
|
0
|
return # no file to work with
|
158
|
4
|
self.f = open(self.logfile, "rb")
|
159
|
|
# if we only want new lines, seek to
|
160
|
|
# where we stat'd so we only find new
|
161
|
|
# lines
|
162
|
4
|
if self.follow:
|
163
|
0
|
self.f.seek(s[2], 0)
|
164
|
4
|
self.started = True
|
165
|
4
|
self.f.seek(self.f.tell(), 0)
|
166
|
|
while True:
|
167
|
4
|
data = self.f.read(10000)
|
168
|
4
|
if not data:
|
169
|
4
|
return
|
170
|
4
|
decodedData = self.logDecode.decode(data)
|
171
|
4
|
self.command.addLogfile(self.name, decodedData)
|
172
|
|
|
173
|
|
|
174
|
4
|
if runtime.platformType == 'posix':
|
175
|
4
|
class ProcGroupProcess(Process):
|
176
|
|
|
177
|
|
"""Simple subclass of Process to also make the spawned process a process
|
178
|
|
group leader, so we can kill all members of the process group."""
|
179
|
|
|
180
|
4
|
def _setupChild(self, *args, **kwargs):
|
181
|
0
|
Process._setupChild(self, *args, **kwargs)
|
182
|
|
|
183
|
|
# this will cause the child to be the leader of its own process group;
|
184
|
|
# it's also spelled setpgrp() on BSD, but this spelling seems to work
|
185
|
|
# everywhere
|
186
|
0
|
os.setpgid(0, 0)
|
187
|
|
|
188
|
|
|
189
|
4
|
class RunProcessPP(protocol.ProcessProtocol):
|
190
|
4
|
debug = False
|
191
|
|
|
192
|
4
|
def __init__(self, command):
|
193
|
4
|
self.command = command
|
194
|
4
|
self.pending_stdin = b""
|
195
|
4
|
self.stdin_finished = False
|
196
|
4
|
self.killed = False
|
197
|
4
|
decoderFactory = getincrementaldecoder(
|
198
|
|
self.command.builder.unicode_encoding)
|
199
|
4
|
self.stdoutDecode = decoderFactory(errors='replace')
|
200
|
4
|
self.stderrDecode = decoderFactory(errors='replace')
|
201
|
|
|
202
|
4
|
def setStdin(self, data):
|
203
|
4
|
assert not self.connected
|
204
|
4
|
self.pending_stdin = data
|
205
|
|
|
206
|
4
|
def connectionMade(self):
|
207
|
|
if self.debug:
|
208
|
|
log.msg("RunProcessPP.connectionMade")
|
209
|
|
|
210
|
4
|
if self.command.useProcGroup:
|
211
|
|
if self.debug:
|
212
|
|
log.msg(" recording pid {0} as subprocess pgid".format(
|
213
|
|
self.transport.pid))
|
214
|
4
|
self.transport.pgid = self.transport.pid
|
215
|
|
|
216
|
4
|
if self.pending_stdin:
|
217
|
|
if self.debug:
|
218
|
|
log.msg(" writing to stdin")
|
219
|
4
|
self.transport.write(self.pending_stdin)
|
220
|
|
if self.debug:
|
221
|
|
log.msg(" closing stdin")
|
222
|
4
|
self.transport.closeStdin()
|
223
|
|
|
224
|
4
|
def outReceived(self, data):
|
225
|
|
if self.debug:
|
226
|
|
log.msg("RunProcessPP.outReceived")
|
227
|
4
|
decodedData = self.stdoutDecode.decode(data)
|
228
|
4
|
self.command.addStdout(decodedData)
|
229
|
|
|
230
|
4
|
def errReceived(self, data):
|
231
|
|
if self.debug:
|
232
|
|
log.msg("RunProcessPP.errReceived")
|
233
|
4
|
decodedData = self.stderrDecode.decode(data)
|
234
|
4
|
self.command.addStderr(decodedData)
|
235
|
|
|
236
|
4
|
def processEnded(self, status_object):
|
237
|
|
if self.debug:
|
238
|
|
log.msg("RunProcessPP.processEnded", status_object)
|
239
|
|
# status_object is a Failure wrapped around an
|
240
|
|
# error.ProcessTerminated or and error.ProcessDone.
|
241
|
|
# requires twisted >= 1.0.4 to overcome a bug in process.py
|
242
|
4
|
sig = status_object.value.signal
|
243
|
4
|
rc = status_object.value.exitCode
|
244
|
|
|
245
|
|
# sometimes, even when we kill a process, GetExitCodeProcess will still return
|
246
|
|
# a zero exit status. So we force it. See
|
247
|
|
# http://stackoverflow.com/questions/2061735/42-passed-to-terminateprocess-sometimes-getexitcodeprocess-returns-0
|
248
|
4
|
if self.killed and rc == 0:
|
249
|
4
|
log.msg(
|
250
|
|
"process was killed, but exited with status 0; faking a failure")
|
251
|
|
# windows returns '1' even for signalled failures, while POSIX
|
252
|
|
# returns -1
|
253
|
4
|
if runtime.platformType == 'win32':
|
254
|
0
|
rc = 1
|
255
|
|
else:
|
256
|
4
|
rc = -1
|
257
|
4
|
self.command.finished(sig, rc)
|
258
|
|
|
259
|
|
|
260
|
4
|
class RunProcess(object):
|
261
|
|
|
262
|
|
"""
|
263
|
|
This is a helper class, used by worker commands to run programs in a child
|
264
|
|
shell.
|
265
|
|
"""
|
266
|
|
|
267
|
4
|
BACKUP_TIMEOUT = 5
|
268
|
4
|
interruptSignal = "KILL"
|
269
|
4
|
CHUNK_LIMIT = 128 * 1024
|
270
|
|
|
271
|
|
# Don't send any data until at least BUFFER_SIZE bytes have been collected
|
272
|
|
# or BUFFER_TIMEOUT elapsed
|
273
|
4
|
BUFFER_SIZE = 64 * 1024
|
274
|
4
|
BUFFER_TIMEOUT = 5
|
275
|
|
|
276
|
|
# For sending elapsed time:
|
277
|
4
|
startTime = None
|
278
|
4
|
elapsedTime = None
|
279
|
|
|
280
|
|
# For scheduling future events
|
281
|
4
|
_reactor = reactor
|
282
|
|
|
283
|
|
# I wish we had easy access to CLOCK_MONOTONIC in Python:
|
284
|
|
# http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html
|
285
|
|
# Then changes to the system clock during a run wouldn't effect the "elapsed
|
286
|
|
# time" results.
|
287
|
|
|
288
|
4
|
def __init__(self, builder, command,
|
289
|
|
workdir, environ=None,
|
290
|
|
sendStdout=True, sendStderr=True, sendRC=True,
|
291
|
|
timeout=None, maxTime=None, sigtermTime=None,
|
292
|
|
initialStdin=None, keepStdout=False, keepStderr=False,
|
293
|
|
logEnviron=True, logfiles=None, usePTY=False,
|
294
|
|
useProcGroup=True):
|
295
|
|
"""
|
296
|
|
|
297
|
|
@param keepStdout: if True, we keep a copy of all the stdout text
|
298
|
|
that we've seen. This copy is available in
|
299
|
|
self.stdout, which can be read after the command
|
300
|
|
has finished.
|
301
|
|
@param keepStderr: same, for stderr
|
302
|
|
|
303
|
|
@param usePTY: true to use a PTY, false to not use a PTY.
|
304
|
|
|
305
|
|
@param useProcGroup: (default True) use a process group for non-PTY
|
306
|
|
process invocations
|
307
|
|
"""
|
308
|
4
|
if logfiles is None:
|
309
|
4
|
logfiles = {}
|
310
|
|
|
311
|
4
|
self.builder = builder
|
312
|
4
|
if isinstance(command, list):
|
313
|
4
|
def obfus(w):
|
314
|
4
|
if (isinstance(w, tuple) and len(w) == 3 and
|
315
|
|
w[0] == 'obfuscated'):
|
316
|
4
|
return util.Obfuscated(w[1], w[2])
|
317
|
4
|
return w
|
318
|
4
|
command = [obfus(w) for w in command]
|
319
|
|
# We need to take unicode commands and arguments and encode them using
|
320
|
|
# the appropriate encoding for the worker. This is mostly platform
|
321
|
|
# specific, but can be overridden in the worker's buildbot.tac file.
|
322
|
|
#
|
323
|
|
# Encoding the command line here ensures that the called executables
|
324
|
|
# receive arguments as bytestrings encoded with an appropriate
|
325
|
|
# platform-specific encoding. It also plays nicely with twisted's
|
326
|
|
# spawnProcess which checks that arguments are regular strings or
|
327
|
|
# unicode strings that can be encoded as ascii (which generates a
|
328
|
|
# warning).
|
329
|
|
|
330
|
4
|
def to_bytes(cmd):
|
331
|
4
|
if isinstance(cmd, (tuple, list)):
|
332
|
4
|
for i, a in enumerate(cmd):
|
333
|
4
|
if isinstance(a, text_type):
|
334
|
4
|
cmd[i] = a.encode(self.builder.unicode_encoding)
|
335
|
4
|
elif isinstance(cmd, text_type):
|
336
|
4
|
cmd = cmd.encode(self.builder.unicode_encoding)
|
337
|
4
|
return cmd
|
338
|
|
|
339
|
4
|
self.command = to_bytes(util.Obfuscated.get_real(command))
|
340
|
4
|
self.fake_command = to_bytes(util.Obfuscated.get_fake(command))
|
341
|
|
|
342
|
4
|
self.sendStdout = sendStdout
|
343
|
4
|
self.sendStderr = sendStderr
|
344
|
4
|
self.sendRC = sendRC
|
345
|
4
|
self.logfiles = logfiles
|
346
|
4
|
self.workdir = workdir
|
347
|
4
|
self.process = None
|
348
|
4
|
if not os.path.exists(workdir):
|
349
|
4
|
os.makedirs(workdir)
|
350
|
4
|
if environ:
|
351
|
4
|
for key, v in iteritems(environ):
|
352
|
4
|
if isinstance(v, list):
|
353
|
|
# Need to do os.pathsep translation. We could either do that
|
354
|
|
# by replacing all incoming ':'s with os.pathsep, or by
|
355
|
|
# accepting lists. I like lists better.
|
356
|
|
# If it's not a string, treat it as a sequence to be
|
357
|
|
# turned in to a string.
|
358
|
4
|
environ[key] = os.pathsep.join(environ[key])
|
359
|
|
|
360
|
4
|
if "PYTHONPATH" in environ:
|
361
|
4
|
environ['PYTHONPATH'] += os.pathsep + "${PYTHONPATH}"
|
362
|
|
|
363
|
|
# do substitution on variable values matching pattern: ${name}
|
364
|
4
|
p = re.compile(r'\${([0-9a-zA-Z_]*)}')
|
365
|
|
|
366
|
4
|
def subst(match):
|
367
|
4
|
return os.environ.get(match.group(1), "")
|
368
|
4
|
newenv = {}
|
369
|
4
|
for key in os.environ:
|
370
|
|
# setting a key to None will delete it from the worker
|
371
|
|
# environment
|
372
|
4
|
if key not in environ or environ[key] is not None:
|
373
|
4
|
newenv[key] = os.environ[key]
|
374
|
4
|
for key, v in iteritems(environ):
|
375
|
4
|
if v is not None:
|
376
|
4
|
if not isinstance(v, string_types):
|
377
|
4
|
raise RuntimeError("'env' values must be strings or "
|
378
|
|
"lists; key '{0}' is incorrect".format(key))
|
379
|
4
|
newenv[key] = p.sub(subst, v)
|
380
|
|
|
381
|
4
|
self.environ = newenv
|
382
|
|
else: # not environ
|
383
|
4
|
self.environ = os.environ.copy()
|
384
|
4
|
self.initialStdin = to_bytes(initialStdin)
|
385
|
4
|
self.logEnviron = logEnviron
|
386
|
4
|
self.timeout = timeout
|
387
|
4
|
self.ioTimeoutTimer = None
|
388
|
4
|
self.sigtermTime = sigtermTime
|
389
|
4
|
self.maxTime = maxTime
|
390
|
4
|
self.maxTimeoutTimer = None
|
391
|
4
|
self.killTimer = None
|
392
|
4
|
self.keepStdout = keepStdout
|
393
|
4
|
self.keepStderr = keepStderr
|
394
|
|
|
395
|
4
|
self.buffered = deque()
|
396
|
4
|
self.buflen = 0
|
397
|
4
|
self.sendBuffersTimer = None
|
398
|
|
|
399
|
4
|
assert usePTY in (True, False), \
|
400
|
|
"Unexpected usePTY argument value: {!r}. Expected boolean.".format(
|
401
|
|
usePTY)
|
402
|
4
|
self.usePTY = usePTY
|
403
|
|
|
404
|
|
# usePTY=True is a convenience for cleaning up all children and
|
405
|
|
# grandchildren of a hung command. Fall back to usePTY=False on systems
|
406
|
|
# and in situations where ptys cause problems. PTYs are posix-only,
|
407
|
|
# and for .closeStdin to matter, we must use a pipe, not a PTY
|
408
|
4
|
if runtime.platformType != "posix" or initialStdin is not None:
|
409
|
4
|
if self.usePTY:
|
410
|
0
|
self.sendStatus(
|
411
|
|
{'header': "WARNING: disabling usePTY for this command"})
|
412
|
4
|
self.usePTY = False
|
413
|
|
|
414
|
|
# use an explicit process group on POSIX, noting that usePTY always implies
|
415
|
|
# a process group.
|
416
|
4
|
if runtime.platformType != 'posix':
|
417
|
0
|
useProcGroup = False
|
418
|
4
|
elif self.usePTY:
|
419
|
4
|
useProcGroup = True
|
420
|
4
|
self.useProcGroup = useProcGroup
|
421
|
|
|
422
|
4
|
self.logFileWatchers = []
|
423
|
4
|
for name, filevalue in self.logfiles.items():
|
424
|
0
|
filename = filevalue
|
425
|
0
|
follow = False
|
426
|
|
|
427
|
|
# check for a dictionary of options
|
428
|
|
# filename is required, others are optional
|
429
|
0
|
if isinstance(filevalue, dict):
|
430
|
0
|
filename = filevalue['filename']
|
431
|
0
|
follow = filevalue.get('follow', False)
|
432
|
|
|
433
|
0
|
w = LogFileWatcher(self, name,
|
434
|
|
os.path.join(self.workdir, filename),
|
435
|
|
follow=follow)
|
436
|
0
|
self.logFileWatchers.append(w)
|
437
|
|
|
438
|
|
def __repr__(self):
|
439
|
|
return "<{0} '{1}'>".format(self.__class__.__name__, self.fake_command)
|
440
|
|
|
441
|
4
|
def sendStatus(self, status):
|
442
|
4
|
self.builder.sendUpdate(status)
|
443
|
|
|
444
|
4
|
def start(self):
|
445
|
|
# return a Deferred which fires (with the exit code) when the command
|
446
|
|
# completes
|
447
|
4
|
if self.keepStdout:
|
448
|
4
|
self.stdout = ""
|
449
|
4
|
if self.keepStderr:
|
450
|
4
|
self.stderr = ""
|
451
|
4
|
self.deferred = defer.Deferred()
|
452
|
4
|
try:
|
453
|
4
|
self._startCommand()
|
454
|
4
|
except Exception:
|
455
|
4
|
log.err(failure.Failure(), "error in RunProcess._startCommand")
|
456
|
4
|
self._addToBuffers('stderr', "error in RunProcess._startCommand\n")
|
457
|
4
|
self._addToBuffers('stderr', traceback.format_exc())
|
458
|
4
|
self._sendBuffers()
|
459
|
|
# pretend it was a shell error
|
460
|
4
|
self.deferred.errback(AbandonChain(-1))
|
461
|
4
|
return self.deferred
|
462
|
|
|
463
|
4
|
def _startCommand(self):
|
464
|
|
# ensure workdir exists
|
465
|
4
|
if not os.path.isdir(self.workdir):
|
466
|
0
|
os.makedirs(self.workdir)
|
467
|
4
|
log.msg("RunProcess._startCommand")
|
468
|
|
|
469
|
4
|
self.pp = RunProcessPP(self)
|
470
|
|
|
471
|
4
|
self.using_comspec = False
|
472
|
4
|
self.command = unicode2bytes(self.command, encoding=self.builder.unicode_encoding)
|
473
|
4
|
if isinstance(self.command, bytes):
|
474
|
4
|
if runtime.platformType == 'win32':
|
475
|
|
# allow %COMSPEC% to have args
|
476
|
0
|
argv = os.environ['COMSPEC'].split()
|
477
|
0
|
if '/c' not in argv:
|
478
|
0
|
argv += ['/c']
|
479
|
0
|
argv += [self.command]
|
480
|
0
|
self.using_comspec = True
|
481
|
|
else:
|
482
|
|
# for posix, use /bin/sh. for other non-posix, well, doesn't
|
483
|
|
# hurt to try
|
484
|
4
|
argv = [b'/bin/sh', b'-c', self.command]
|
485
|
4
|
display = self.fake_command
|
486
|
|
else:
|
487
|
|
# On windows, CreateProcess requires an absolute path to the executable.
|
488
|
|
# When we call spawnProcess below, we pass argv[0] as the executable.
|
489
|
|
# So, for .exe's that we have absolute paths to, we can call directly
|
490
|
|
# Otherwise, we should run under COMSPEC (usually cmd.exe) to
|
491
|
|
# handle path searching, etc.
|
492
|
4
|
if (runtime.platformType == 'win32' and
|
493
|
|
not (bytes2unicode(self.command[0],
|
494
|
|
self.builder.unicode_encoding).lower().endswith(".exe") and
|
495
|
|
os.path.isabs(self.command[0]))):
|
496
|
|
# allow %COMSPEC% to have args
|
497
|
0
|
argv = os.environ['COMSPEC'].split()
|
498
|
0
|
if '/c' not in argv:
|
499
|
0
|
argv += ['/c']
|
500
|
0
|
argv += list(self.command)
|
501
|
0
|
self.using_comspec = True
|
502
|
|
else:
|
503
|
4
|
argv = self.command
|
504
|
|
# Attempt to format this for use by a shell, although the process
|
505
|
|
# isn't perfect
|
506
|
4
|
display = shell_quote(self.fake_command, self.builder.unicode_encoding)
|
507
|
|
|
508
|
4
|
display = bytes2unicode(display, self.builder.unicode_encoding)
|
509
|
|
|
510
|
|
# $PWD usually indicates the current directory; spawnProcess may not
|
511
|
|
# update this value, though, so we set it explicitly here. This causes
|
512
|
|
# weird problems (bug #456) on msys, though..
|
513
|
4
|
if not self.environ.get('MACHTYPE', None) == 'i686-pc-msys':
|
514
|
4
|
self.environ['PWD'] = os.path.abspath(self.workdir)
|
515
|
|
|
516
|
|
# self.stdin is handled in RunProcessPP.connectionMade
|
517
|
|
|
518
|
4
|
log.msg(u" " + display)
|
519
|
4
|
self._addToBuffers(u'header', display + u"\n")
|
520
|
|
|
521
|
|
# then comes the secondary information
|
522
|
4
|
msg = u" in dir {0}".format(self.workdir)
|
523
|
4
|
if self.timeout:
|
524
|
4
|
if self.timeout == 1:
|
525
|
0
|
unit = u"sec"
|
526
|
|
else:
|
527
|
4
|
unit = u"secs"
|
528
|
4
|
msg += u" (timeout {0} {1})".format(self.timeout, unit)
|
529
|
4
|
if self.maxTime:
|
530
|
4
|
if self.maxTime == 1:
|
531
|
0
|
unit = u"sec"
|
532
|
|
else:
|
533
|
4
|
unit = u"secs"
|
534
|
4
|
msg += u" (maxTime {0} {1})".format(self.maxTime, unit)
|
535
|
4
|
log.msg(u" " + msg)
|
536
|
4
|
self._addToBuffers(u'header', msg + u"\n")
|
537
|
|
|
538
|
4
|
msg = " watching logfiles {0}".format(self.logfiles)
|
539
|
4
|
log.msg(" " + msg)
|
540
|
4
|
self._addToBuffers('header', msg + u"\n")
|
541
|
|
|
542
|
|
# then the obfuscated command array for resolving unambiguity
|
543
|
4
|
msg = u" argv: {0}".format(self.fake_command)
|
544
|
4
|
log.msg(u" " + msg)
|
545
|
4
|
self._addToBuffers('header', msg + u"\n")
|
546
|
|
|
547
|
|
# then the environment, since it sometimes causes problems
|
548
|
4
|
if self.logEnviron:
|
549
|
4
|
msg = u" environment:\n"
|
550
|
4
|
env_names = sorted(self.environ.keys())
|
551
|
4
|
for name in env_names:
|
552
|
4
|
msg += u" {0}={1}\n".format(bytes2unicode(name,
|
553
|
|
encoding=self.builder.unicode_encoding),
|
554
|
|
bytes2unicode(self.environ[name],
|
555
|
|
encoding=self.builder.unicode_encoding))
|
556
|
4
|
log.msg(u" environment:\n{0}".format(pprint.pformat(self.environ)))
|
557
|
4
|
self._addToBuffers(u'header', msg)
|
558
|
|
|
559
|
4
|
if self.initialStdin:
|
560
|
4
|
msg = u" writing {0} bytes to stdin".format(len(self.initialStdin))
|
561
|
4
|
log.msg(u" " + msg)
|
562
|
4
|
self._addToBuffers(u'header', msg + u"\n")
|
563
|
|
|
564
|
4
|
msg = u" using PTY: {0}".format(bool(self.usePTY))
|
565
|
4
|
log.msg(u" " + msg)
|
566
|
4
|
self._addToBuffers(u'header', msg + u"\n")
|
567
|
|
|
568
|
|
# put data into stdin and close it, if necessary. This will be
|
569
|
|
# buffered until connectionMade is called
|
570
|
4
|
if self.initialStdin:
|
571
|
4
|
self.pp.setStdin(self.initialStdin)
|
572
|
|
|
573
|
4
|
self.startTime = util.now(self._reactor)
|
574
|
|
|
575
|
|
# start the process
|
576
|
|
|
577
|
4
|
self.process = self._spawnProcess(
|
578
|
|
self.pp, argv[0], argv,
|
579
|
|
self.environ,
|
580
|
|
self.workdir,
|
581
|
|
usePTY=self.usePTY)
|
582
|
|
|
583
|
|
# set up timeouts
|
584
|
|
|
585
|
4
|
if self.timeout:
|
586
|
4
|
self.ioTimeoutTimer = self._reactor.callLater(
|
587
|
|
self.timeout, self.doTimeout)
|
588
|
|
|
589
|
4
|
if self.maxTime:
|
590
|
4
|
self.maxTimeoutTimer = self._reactor.callLater(
|
591
|
|
self.maxTime, self.doMaxTimeout)
|
592
|
|
|
593
|
4
|
for w in self.logFileWatchers:
|
594
|
0
|
w.start()
|
595
|
|
|
596
|
4
|
def _spawnProcess(self, processProtocol, executable, args=(), env=None,
|
597
|
|
path=None, uid=None, gid=None, usePTY=False, childFDs=None):
|
598
|
|
"""private implementation of reactor.spawnProcess, to allow use of
|
599
|
|
L{ProcGroupProcess}"""
|
600
|
4
|
if env is None:
|
601
|
0
|
env = {}
|
602
|
|
|
603
|
|
# use the ProcGroupProcess class, if available
|
604
|
4
|
if runtime.platformType == 'posix':
|
605
|
4
|
if self.useProcGroup and not usePTY:
|
606
|
4
|
return ProcGroupProcess(reactor, executable, args, env, path,
|
607
|
|
processProtocol, uid, gid, childFDs)
|
608
|
|
|
609
|
|
# fall back
|
610
|
4
|
if self.using_comspec:
|
611
|
0
|
return self._spawnAsBatch(processProtocol, executable, args, env,
|
612
|
|
path, usePTY=usePTY)
|
613
|
4
|
return reactor.spawnProcess(processProtocol, executable, args, env,
|
614
|
|
path, usePTY=usePTY)
|
615
|
|
|
616
|
4
|
def _spawnAsBatch(self, processProtocol, executable, args, env,
|
617
|
|
path, usePTY):
|
618
|
|
"""A cheat that routes around the impedance mismatch between
|
619
|
|
twisted and cmd.exe with respect to escaping quotes"""
|
620
|
|
|
621
|
|
# NamedTemporaryFile differs in PY2 and PY3.
|
622
|
|
# In PY2, it needs encoded str and its encoding cannot be specified.
|
623
|
|
# In PY3, it needs str which is unicode and its encoding can be specified.
|
624
|
4
|
if PY3:
|
625
|
4
|
tf = NamedTemporaryFile(mode='w+', dir='.', suffix=".bat",
|
626
|
|
delete=False, encoding=self.builder.unicode_encoding)
|
627
|
|
else:
|
628
|
0
|
tf = NamedTemporaryFile(mode='w+', dir='.', suffix=".bat",
|
629
|
|
delete=False)
|
630
|
|
|
631
|
|
# echo off hides this cheat from the log files.
|
632
|
4
|
tf.write(u"@echo off\n")
|
633
|
4
|
if isinstance(self.command, (string_types, bytes)):
|
634
|
4
|
tf.write(bytes2NativeString(self.command, self.builder.unicode_encoding))
|
635
|
|
else:
|
636
|
4
|
tf.write(win32_batch_quote(self.command, self.builder.unicode_encoding))
|
637
|
4
|
tf.close()
|
638
|
|
|
639
|
4
|
argv = os.environ['COMSPEC'].split() # allow %COMSPEC% to have args
|
640
|
4
|
if '/c' not in argv:
|
641
|
4
|
argv += ['/c']
|
642
|
4
|
argv += [tf.name]
|
643
|
|
|
644
|
4
|
def unlink_temp(result):
|
645
|
0
|
os.unlink(tf.name)
|
646
|
0
|
return result
|
647
|
4
|
self.deferred.addBoth(unlink_temp)
|
648
|
|
|
649
|
4
|
return reactor.spawnProcess(processProtocol, executable, argv, env,
|
650
|
|
path, usePTY=usePTY)
|
651
|
|
|
652
|
4
|
def _chunkForSend(self, data):
|
653
|
|
"""
|
654
|
|
limit the chunks that we send over PB to 128k, since it has a hardwired
|
655
|
|
string-size limit of 640k.
|
656
|
|
"""
|
657
|
4
|
LIMIT = self.CHUNK_LIMIT
|
658
|
4
|
for i in range(0, len(data), LIMIT):
|
659
|
4
|
yield data[i:i + LIMIT]
|
660
|
|
|
661
|
4
|
def _collapseMsg(self, msg):
|
662
|
|
"""
|
663
|
|
Take msg, which is a dictionary of lists of output chunks, and
|
664
|
|
concatenate all the chunks into a single string
|
665
|
|
"""
|
666
|
4
|
retval = {}
|
667
|
4
|
for logname in msg:
|
668
|
4
|
data = u""
|
669
|
4
|
for m in msg[logname]:
|
670
|
4
|
m = bytes2unicode(m, self.builder.unicode_encoding)
|
671
|
4
|
data += m
|
672
|
4
|
if isinstance(logname, tuple) and logname[0] == 'log':
|
673
|
4
|
retval['log'] = (logname[1], data)
|
674
|
|
else:
|
675
|
4
|
retval[logname] = data
|
676
|
4
|
return retval
|
677
|
|
|
678
|
4
|
def _sendMessage(self, msg):
|
679
|
|
"""
|
680
|
|
Collapse and send msg to the master
|
681
|
|
"""
|
682
|
4
|
if not msg:
|
683
|
0
|
return
|
684
|
4
|
msg = self._collapseMsg(msg)
|
685
|
4
|
self.sendStatus(msg)
|
686
|
|
|
687
|
4
|
def _bufferTimeout(self):
|
688
|
4
|
self.sendBuffersTimer = None
|
689
|
4
|
self._sendBuffers()
|
690
|
|
|
691
|
4
|
def _sendBuffers(self):
|
692
|
|
"""
|
693
|
|
Send all the content in our buffers.
|
694
|
|
"""
|
695
|
4
|
msg = {}
|
696
|
4
|
msg_size = 0
|
697
|
4
|
lastlog = None
|
698
|
4
|
logdata = []
|
699
|
4
|
while self.buffered:
|
700
|
|
# Grab the next bits from the buffer
|
701
|
4
|
logname, data = self.buffered.popleft()
|
702
|
|
|
703
|
|
# If this log is different than the last one, then we have to send
|
704
|
|
# out the message so far. This is because the message is
|
705
|
|
# transferred as a dictionary, which makes the ordering of keys
|
706
|
|
# unspecified, and makes it impossible to interleave data from
|
707
|
|
# different logs. A future enhancement could be to change the
|
708
|
|
# master to support a list of (logname, data) tuples instead of a
|
709
|
|
# dictionary.
|
710
|
|
# On our first pass through this loop lastlog is None
|
711
|
4
|
if lastlog is None:
|
712
|
4
|
lastlog = logname
|
713
|
4
|
elif logname != lastlog:
|
714
|
4
|
self._sendMessage(msg)
|
715
|
4
|
msg = {}
|
716
|
4
|
msg_size = 0
|
717
|
4
|
lastlog = logname
|
718
|
|
|
719
|
4
|
logdata = msg.setdefault(logname, [])
|
720
|
|
|
721
|
|
# Chunkify the log data to make sure we're not sending more than
|
722
|
|
# CHUNK_LIMIT at a time
|
723
|
4
|
for chunk in self._chunkForSend(data):
|
724
|
4
|
if not chunk:
|
725
|
0
|
continue
|
726
|
4
|
logdata.append(chunk)
|
727
|
4
|
msg_size += len(chunk)
|
728
|
4
|
if msg_size >= self.CHUNK_LIMIT:
|
729
|
|
# We've gone beyond the chunk limit, so send out our
|
730
|
|
# message. At worst this results in a message slightly
|
731
|
|
# larger than (2*CHUNK_LIMIT)-1
|
732
|
4
|
self._sendMessage(msg)
|
733
|
4
|
msg = {}
|
734
|
4
|
logdata = msg.setdefault(logname, [])
|
735
|
4
|
msg_size = 0
|
736
|
4
|
self.buflen = 0
|
737
|
4
|
if logdata:
|
738
|
4
|
self._sendMessage(msg)
|
739
|
4
|
if self.sendBuffersTimer:
|
740
|
4
|
if self.sendBuffersTimer.active():
|
741
|
4
|
self.sendBuffersTimer.cancel()
|
742
|
4
|
self.sendBuffersTimer = None
|
743
|
|
|
744
|
4
|
def _addToBuffers(self, logname, data):
|
745
|
|
"""
|
746
|
|
Add data to the buffer for logname
|
747
|
|
Start a timer to send the buffers if BUFFER_TIMEOUT elapses.
|
748
|
|
If adding data causes the buffer size to grow beyond BUFFER_SIZE, then
|
749
|
|
the buffers will be sent.
|
750
|
|
"""
|
751
|
4
|
n = len(data)
|
752
|
|
|
753
|
4
|
self.buflen += n
|
754
|
4
|
self.buffered.append((logname, data))
|
755
|
4
|
if self.buflen > self.BUFFER_SIZE:
|
756
|
4
|
self._sendBuffers()
|
757
|
4
|
elif not self.sendBuffersTimer:
|
758
|
4
|
self.sendBuffersTimer = self._reactor.callLater(
|
759
|
|
self.BUFFER_TIMEOUT, self._bufferTimeout)
|
760
|
|
|
761
|
4
|
def addStdout(self, data):
|
762
|
4
|
if self.sendStdout:
|
763
|
4
|
self._addToBuffers('stdout', data)
|
764
|
|
|
765
|
4
|
if self.keepStdout:
|
766
|
4
|
self.stdout += data
|
767
|
4
|
if self.ioTimeoutTimer:
|
768
|
4
|
self.ioTimeoutTimer.reset(self.timeout)
|
769
|
|
|
770
|
4
|
def addStderr(self, data):
|
771
|
4
|
if self.sendStderr:
|
772
|
4
|
self._addToBuffers('stderr', data)
|
773
|
|
|
774
|
4
|
if self.keepStderr:
|
775
|
4
|
self.stderr += data
|
776
|
4
|
if self.ioTimeoutTimer:
|
777
|
4
|
self.ioTimeoutTimer.reset(self.timeout)
|
778
|
|
|
779
|
4
|
def addLogfile(self, name, data):
|
780
|
4
|
self._addToBuffers(('log', name), data)
|
781
|
|
|
782
|
4
|
if self.ioTimeoutTimer:
|
783
|
0
|
self.ioTimeoutTimer.reset(self.timeout)
|
784
|
|
|
785
|
4
|
def finished(self, sig, rc):
|
786
|
4
|
self.elapsedTime = util.now(self._reactor) - self.startTime
|
787
|
4
|
log.msg("command finished with signal {0}, exit code {1}, elapsedTime: {2:0.6f}".format(
|
788
|
|
sig, rc, self.elapsedTime))
|
789
|
4
|
for w in self.logFileWatchers:
|
790
|
|
# this will send the final updates
|
791
|
0
|
w.stop()
|
792
|
4
|
self._sendBuffers()
|
793
|
4
|
if sig is not None:
|
794
|
4
|
rc = -1
|
795
|
4
|
if self.sendRC:
|
796
|
4
|
if sig is not None:
|
797
|
4
|
self.sendStatus(
|
798
|
|
{'header': "process killed by signal {0}\n".format(sig)})
|
799
|
4
|
self.sendStatus({'rc': rc})
|
800
|
4
|
self.sendStatus({'header': "elapsedTime={0:0.6f}\n".format(self.elapsedTime)})
|
801
|
4
|
self._cancelTimers()
|
802
|
4
|
d = self.deferred
|
803
|
4
|
self.deferred = None
|
804
|
4
|
if d:
|
805
|
4
|
d.callback(rc)
|
806
|
|
else:
|
807
|
0
|
log.msg("Hey, command {0} finished twice".format(self))
|
808
|
|
|
809
|
4
|
def failed(self, why):
|
810
|
0
|
self._sendBuffers()
|
811
|
0
|
log.msg("RunProcess.failed: command failed: {0}".format(why))
|
812
|
0
|
self._cancelTimers()
|
813
|
0
|
d = self.deferred
|
814
|
0
|
self.deferred = None
|
815
|
0
|
if d:
|
816
|
0
|
d.errback(why)
|
817
|
|
else:
|
818
|
0
|
log.msg("Hey, command {0} finished twice".format(self))
|
819
|
|
|
820
|
4
|
def doTimeout(self):
|
821
|
4
|
self.ioTimeoutTimer = None
|
822
|
4
|
msg = (
|
823
|
|
"command timed out: {0} seconds without output running {1}".format(
|
824
|
|
self.timeout, self.fake_command))
|
825
|
4
|
self.kill(msg)
|
826
|
|
|
827
|
4
|
def doMaxTimeout(self):
|
828
|
4
|
self.maxTimeoutTimer = None
|
829
|
4
|
msg = "command timed out: {0} seconds elapsed running {1}".format(
|
830
|
|
self.maxTime, self.fake_command)
|
831
|
4
|
self.kill(msg)
|
832
|
|
|
833
|
4
|
def isDead(self):
|
834
|
0
|
if self.process.pid is None:
|
835
|
0
|
return True
|
836
|
0
|
pid = int(self.process.pid)
|
837
|
0
|
try:
|
838
|
0
|
os.kill(pid, 0)
|
839
|
0
|
except OSError:
|
840
|
0
|
return True # dead
|
841
|
0
|
return False # alive
|
842
|
|
|
843
|
4
|
def checkProcess(self):
|
844
|
0
|
self.sigtermTimer = None
|
845
|
0
|
if not self.isDead():
|
846
|
0
|
hit = self.sendSig(self.interruptSignal)
|
847
|
|
else:
|
848
|
0
|
hit = 1
|
849
|
0
|
self.cleanUp(hit)
|
850
|
|
|
851
|
4
|
def cleanUp(self, hit):
|
852
|
4
|
if not hit:
|
853
|
4
|
log.msg("signalProcess/os.kill failed both times")
|
854
|
|
|
855
|
4
|
if runtime.platformType == "posix":
|
856
|
|
# we only do this under posix because the win32eventreactor
|
857
|
|
# blocks here until the process has terminated, while closing
|
858
|
|
# stderr. This is weird.
|
859
|
4
|
self.pp.transport.loseConnection()
|
860
|
|
|
861
|
4
|
if self.deferred:
|
862
|
|
# finished ought to be called momentarily. Just in case it doesn't,
|
863
|
|
# set a timer which will abandon the command.
|
864
|
4
|
self.killTimer = self._reactor.callLater(self.BACKUP_TIMEOUT,
|
865
|
|
self.doBackupTimeout)
|
866
|
|
|
867
|
4
|
def sendSig(self, interruptSignal):
|
868
|
4
|
hit = 0
|
869
|
|
# try signalling the process group
|
870
|
4
|
if not hit and self.useProcGroup and runtime.platformType == "posix":
|
871
|
4
|
sig = getattr(signal, "SIG" + interruptSignal, None)
|
872
|
|
|
873
|
4
|
if sig is None:
|
874
|
0
|
log.msg("signal module is missing SIG{0}".format(interruptSignal))
|
875
|
4
|
elif not hasattr(os, "kill"):
|
876
|
0
|
log.msg("os module is missing the 'kill' function")
|
877
|
4
|
elif self.process.pgid is None:
|
878
|
4
|
log.msg("self.process has no pgid")
|
879
|
|
else:
|
880
|
4
|
log.msg("trying to kill process group {0}".format(
|
881
|
|
self.process.pgid))
|
882
|
4
|
try:
|
883
|
4
|
os.killpg(self.process.pgid, sig)
|
884
|
4
|
log.msg(" signal {0} sent successfully".format(sig))
|
885
|
4
|
self.process.pgid = None
|
886
|
4
|
hit = 1
|
887
|
4
|
except OSError:
|
888
|
4
|
log.msg('failed to kill process group (ignored): {0}'.format(
|
889
|
|
(sys.exc_info()[1])))
|
890
|
|
# probably no-such-process, maybe because there is no process
|
891
|
|
# group
|
892
|
|
|
893
|
4
|
elif runtime.platformType == "win32":
|
894
|
0
|
if interruptSignal is None:
|
895
|
0
|
log.msg("interruptSignal==None, only pretending to kill child")
|
896
|
0
|
elif self.process.pid is not None:
|
897
|
0
|
if interruptSignal == "TERM":
|
898
|
0
|
log.msg("using TASKKILL PID /T to kill pid {0}".format(
|
899
|
|
self.process.pid))
|
900
|
0
|
subprocess.check_call(
|
901
|
|
"TASKKILL /PID {0} /T".format(self.process.pid))
|
902
|
0
|
log.msg("taskkill'd pid {0}".format(self.process.pid))
|
903
|
0
|
hit = 1
|
904
|
0
|
elif interruptSignal == "KILL":
|
905
|
0
|
log.msg("using TASKKILL PID /F /T to kill pid {0}".format(
|
906
|
|
self.process.pid))
|
907
|
0
|
subprocess.check_call(
|
908
|
|
"TASKKILL /F /PID {0} /T".format(self.process.pid))
|
909
|
0
|
log.msg("taskkill'd pid {0}".format(self.process.pid))
|
910
|
0
|
hit = 1
|
911
|
|
|
912
|
|
# try signalling the process itself (works on Windows too, sorta)
|
913
|
4
|
if not hit:
|
914
|
4
|
try:
|
915
|
4
|
log.msg("trying process.signalProcess('{0}')".format(
|
916
|
|
interruptSignal))
|
917
|
4
|
self.process.signalProcess(interruptSignal)
|
918
|
4
|
log.msg(" signal {0} sent successfully".format(interruptSignal))
|
919
|
4
|
hit = 1
|
920
|
4
|
except OSError:
|
921
|
0
|
log.err("from process.signalProcess:")
|
922
|
|
# could be no-such-process, because they finished very recently
|
923
|
4
|
except error.ProcessExitedAlready:
|
924
|
4
|
log.msg("Process exited already - can't kill")
|
925
|
|
# the process has already exited, and likely finished() has
|
926
|
|
# been called already or will be called shortly
|
927
|
|
|
928
|
4
|
return hit
|
929
|
|
|
930
|
4
|
def kill(self, msg):
|
931
|
|
# This may be called by the timeout, or when the user has decided to
|
932
|
|
# abort this build.
|
933
|
4
|
self._sendBuffers()
|
934
|
4
|
self._cancelTimers()
|
935
|
4
|
msg += ", attempting to kill"
|
936
|
4
|
log.msg(msg)
|
937
|
4
|
self.sendStatus({'header': "\n" + msg + "\n"})
|
938
|
|
|
939
|
|
# let the PP know that we are killing it, so that it can ensure that
|
940
|
|
# the exit status comes out right
|
941
|
4
|
self.pp.killed = True
|
942
|
|
|
943
|
4
|
sendSigterm = self.sigtermTime is not None
|
944
|
4
|
if sendSigterm:
|
945
|
4
|
self.sendSig("TERM")
|
946
|
4
|
self.sigtermTimer = self._reactor.callLater(
|
947
|
|
self.sigtermTime, self.checkProcess)
|
948
|
|
else:
|
949
|
4
|
hit = self.sendSig(self.interruptSignal)
|
950
|
4
|
self.cleanUp(hit)
|
951
|
|
|
952
|
4
|
def doBackupTimeout(self):
|
953
|
0
|
log.msg("we tried to kill the process, and it wouldn't die.."
|
954
|
|
" finish anyway")
|
955
|
0
|
self.killTimer = None
|
956
|
0
|
signalName = "SIG" + self.interruptSignal
|
957
|
0
|
self.sendStatus({'header': signalName + " failed to kill process\n"})
|
958
|
0
|
if self.sendRC:
|
959
|
0
|
self.sendStatus({'header': "using fake rc=-1\n"})
|
960
|
0
|
self.sendStatus({'rc': -1})
|
961
|
0
|
self.failed(RuntimeError(signalName + " failed to kill process"))
|
962
|
|
|
963
|
4
|
def _cancelTimers(self):
|
964
|
4
|
for timerName in ('ioTimeoutTimer', 'killTimer', 'maxTimeoutTimer',
|
965
|
|
'sendBuffersTimer', 'sigtermTimer'):
|
966
|
4
|
timer = getattr(self, timerName, None)
|
967
|
4
|
if timer:
|
968
|
4
|
timer.cancel()
|
969
|
4
|
setattr(self, timerName, None)
|