| |
@@ -4,7 +4,6 @@
|
| |
from subprocess import Popen
|
| |
import time
|
| |
from urlparse import urlparse
|
| |
- import paramiko
|
| |
import glob
|
| |
|
| |
from backend.vm_manage import PUBSUB_INTERRUPT_BUILDER
|
| |
@@ -13,6 +12,7 @@
|
| |
from ..exceptions import BuilderError, BuilderTimeOutError, RemoteCmdError, VmError
|
| |
|
| |
from ..constants import mockchain, rsync, DEF_BUILD_TIMEOUT
|
| |
+ from ..sshcmd import SSHConnectionError, SSHConnection
|
| |
|
| |
import modulemd
|
| |
|
| |
@@ -33,9 +33,8 @@
|
| |
self._remote_basedir = self.opts.remote_basedir
|
| |
self._remote_pkg_path = None
|
| |
|
| |
- # if we're at this point we've connected and done stuff on the host
|
| |
- self.conn = self._create_ssh_conn(username=self.opts.build_user)
|
| |
- self.root_conn = self._create_ssh_conn(username="root")
|
| |
+ self.root_conn = SSHConnection(host=self.hostname, config_file=self.opts.ssh.builder_config)
|
| |
+ self.conn = SSHConnection(user=self.opts.build_user, host=self.hostname, config_file=self.opts.ssh.builder_config)
|
| |
|
| |
self.module_dist_tag = self._load_module_dist_tag()
|
| |
|
| |
@@ -77,13 +76,6 @@
|
| |
def tempdir(self, value):
|
| |
self._remote_tempdir = value
|
| |
|
| |
- def _create_ssh_conn(self, username):
|
| |
- conn = paramiko.SSHClient()
|
| |
- conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
| |
- conn.connect(hostname=self.hostname, port=self.opts.ssh.port,
|
| |
- username=username, key_filename=self.opts.ssh.identity_file)
|
| |
- return conn
|
| |
-
|
| |
def _run_ssh_cmd(self, cmd, as_root=False):
|
| |
"""
|
| |
Executes single shell command remotely
|
| |
@@ -99,17 +91,9 @@
|
| |
|
| |
self.log.info("BUILDER CMD: "+cmd)
|
| |
|
| |
- try:
|
| |
- stdin, stdout, stderr = conn.exec_command(cmd)
|
| |
- except paramiko.SSHException as err:
|
| |
- raise RemoteCmdError("Paramiko failure.",
|
| |
- cmd, -1, as_root, str(err), "(none)")
|
| |
-
|
| |
- rc = stdout.channel.recv_exit_status()
|
| |
- out, err = stdout.read(), stderr.read()
|
| |
-
|
| |
+ rc, out, err = conn.run_expensive(cmd)
|
| |
if rc != 0:
|
| |
- raise RemoteCmdError("Remote command error occurred.",
|
| |
+ raise RemoteCmdError("Error running remote ssh command.",
|
| |
cmd, rc, as_root, err, out)
|
| |
return out, err
|
| |
|
| |
@@ -266,7 +250,17 @@
|
| |
|
| |
buildcmd += self.remote_pkg_path
|
| |
|
| |
- buildcmd_async = '{buildcmd} &>> {livelog} &'.format(
|
| |
+ # To run something on background, we need to:
|
| |
+ # - ignore SIGHUP, 'nohup' is racy -> sighup'ed by ssh before signal
|
| |
+ # handler is actually set by nohup
|
| |
+ # - make sure to not have attached std{out,err} descriptors to the pty
|
| |
+ # provided by 'ssh -t' (redirect or close them!); otherwise 'ssh -t'
|
| |
+ # hangs here till the command finishes, OTOH ...
|
| |
+ # - doing &>{livelog} means that the mockchain command has no terminal
|
| |
+ # on std{out,err} which means that it's output are not line-buffered
|
| |
+ # (which wouldn't be very useful live-log), so let's use `unbuffer`
|
| |
+ # from expect.rpm to allocate _persistent_ server-side pseudo-terminal
|
| |
+ buildcmd_async = 'trap "" SIGHUP; unbuffer {buildcmd} &>{livelog} &'.format(
|
| |
livelog=self.livelog_name, buildcmd=buildcmd)
|
| |
|
| |
self._run_ssh_cmd(buildcmd_async)
|
| |
@@ -308,14 +302,22 @@
|
| |
try:
|
| |
pidof_cmd = "/usr/bin/pgrep -u {user} {command}".format(
|
| |
user=self.opts.build_user, command="mockchain")
|
| |
- out, err = self._run_ssh_cmd(pidof_cmd)
|
| |
- except RemoteCmdError as err:
|
| |
+ out, _ = self._run_ssh_cmd(pidof_cmd)
|
| |
+ except RemoteCmdError:
|
| |
self.log.info("Build is not running. Continuing...")
|
| |
- return None, None
|
| |
+ return
|
| |
+
|
| |
+ ensure_dir_exists(self.job.results_dir, self.log)
|
| |
+ live_log = os.path.join(self.job.results_dir, 'mockchain-live.log')
|
| |
+
|
| |
+ live_cmd = '/usr/bin/tail -f --pid={pid} {log}'.format(
|
| |
+ pid=out.strip(), log=self.livelog_name)
|
| |
+
|
| |
+ self.log.info("Attaching to live build log: " + live_cmd)
|
| |
+ with open(live_log, 'w') as logfile:
|
| |
+ # Ignore the exit status.
|
| |
+ self.conn.run(live_cmd, stdout=logfile, stderr=logfile)
|
| |
|
| |
- self.log.info("Attaching to live build log...")
|
| |
- return self._run_ssh_cmd('/usr/bin/tail -f --pid={pid} {log}'
|
| |
- .format(pid=out.strip(), log=self.livelog_name))
|
| |
|
| |
def build(self):
|
| |
# make mock config
|
| |
@@ -328,13 +330,10 @@
|
| |
self.run_mockchain_async()
|
| |
|
| |
# attach to building output
|
| |
- stdout, stderr = self.attach_to_build()
|
| |
-
|
| |
- return stdout, stderr
|
| |
+ self.attach_to_build()
|
| |
|
| |
def reattach(self):
|
| |
- stdout, stderr = self.attach_to_build()
|
| |
- return stdout, stderr
|
| |
+ self.attach_to_build()
|
| |
|
| |
def rsync_call(self, source_path, target_path):
|
| |
ensure_dir_exists(target_path, self.log)
|
| |
Why remove this? If we get a job for which a worker is already running, we don't want to spawn another one. It should not normally happen but might so we handle that case like this.