#393 modify the code to valid docker deployment
Closed 6 years ago by lnie. Opened 6 years ago by lnie.
taskotron/ lnie/libtaskotron feature-docker  into  feature/ansiblize

file modified
+28 -9
@@ -16,7 +16,7 @@ 

  from libtaskotron import exceptions as exc

  

  try:

-     from libtaskotron.ext.disposable import vm

+     from libtaskotron.ext.disposable import vm,docker

  except ImportError, e:

      raise exc.TaskotronImportError(e)

  
@@ -54,6 +54,13 @@ 

                    self.task_vm.ipaddr)

  

          return self.task_vm.ipaddr

+     def  _spawn_container(self):

+         self.task_container = docker.DockerClient()

+         self.task_container.create_container(uuid=self.arg_data['uuid'],

+                                      artifacts=self.arg_data['artifactsdir'])

+         port = self.task_container.port

+         log.info('Running task on a container on port {}.'.format(port))

+         return port

  

      def _get_client_ipaddr(self):

          '''Get an IP address of the machine the task is going to be executed on.
@@ -62,7 +69,7 @@ 

          '''

          # when running remotely, run directly over ssh, instead of using libvirt

          persistent = False

- 

+         docker = False

          runtask_mode = config.get_config().runtask_mode

          if runtask_mode == config.RuntaskModeName.LOCAL:

              self.run_remotely = False
@@ -84,16 +91,20 @@ 

              log.debug('Forcing remote execution (option --ssh)')

              self.run_remotely = True

              persistent = True

+         elif self.arg_data['docker']:

+             self.run_remotely = True

+             persistent = False

+             docker = True

+             log.debug('Forcing execution on docker (option --docker)')

  

          log.debug('Execution mode: %s', 'remote' if self.run_remotely else 'local')

  

          ipaddr = '127.0.0.1'

-         if self.run_remotely:

+         if self.run_remotely and not docker:

              ipaddr = self.arg_data['machine'] if persistent else None

- 

          return ipaddr

  

-     def _run_ansible_playbook(self, ipaddr):

+     def _run_ansible_playbook(self, inventory):

          '''Run the ansible-playbook command to execute given playbook containing the task.

  

          :param str ipaddr: IP address of the machine the task will be run on
@@ -104,7 +115,7 @@ 

  

          cmd = [

              'ansible-playbook', 'runner.yml',

-             '--inventory=%s,' % ipaddr,

+             '--inventory=%s,' % inventory,

              '-e', 'artifacts=%s' % self.arg_data['artifactsdir'],

              '-e', 'subjects=%s' % self.arg_data['item'],

              '-e', 'taskdir=%s' % taskdir,
@@ -129,11 +140,19 @@ 

          ipaddr = self._get_client_ipaddr()

          if ipaddr is None:

              ipaddr = self._spawn_vm(self.arg_data['uuid'])

- 

-         log.info('Running task on machine %s', ipaddr)

+         if self.arg_data['docker']:

+             port = self._spawn_container()

+             with open(os.path.join('inventory'), 'w') as f:

+                   f.write('localhost ansible_ssh_host=%s ansible_user=root ansible_port=%s ansible_password=passw0rd '

+                          'ansible_ssh_common_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"'% (ipaddr, port))

+             log.info('Running task on machine %s', ipaddr)

+         else:

+             with open(os.path.join('inventory'), 'w') as f:

+                 f.write('[localhost]\n%s' % ipaddr)

+             log.info('Running task on machine %s', ipaddr)

  

          try:

-             self._run_ansible_playbook(ipaddr)

+             self._run_ansible_playbook('inventory')

          finally:

              if self.task_vm is not None:

                  self.task_vm.teardown()

@@ -0,0 +1,186 @@ 

+ import os

+ import random

+ import subprocess as sp

+ 

+ from libtaskotron.logger import log

+ 

+ 

+ class DockerClient(object):

+     '''Helper class for working with the docker daemon.'''

+ 

+     def __init__(self, host_ip="127.0.0.1"):

+         ''':param host_ip: The host IP where the docker daemon runs. We

+         assume for now that it's running on localhost.'''

+ 

+         # FIXME: Support running commands against a remote host.

+         self.host = host_ip

+         self.port = random.randrange(2200, 2300)

+         self.images = self._get_images()

+         self.containers = self._get_containers()

+         # We need to keep a list of the containers we create, so

+         # we can co-exist with other users of whatever docker

+         # daemon we might be interacting with.

+         self.owned_containers = []

+         self.owned_images = []

+ 

+     def _get_images(self):

+         '''Return a list of image dicts.'''

+ 

+         template = "{{.ID}}\t{{.Repository}}"

+         raw_output = sp.check_output(['docker',

+                                       'images',

+                                       '--format',

+                                       template])

+ 

+         images = raw_output.split('\n')

+         attrs = ['id', 'name']

+         images = [image.split('\t') for image in images[:-1]]

+         images = [dict(zip(attrs, image)) for image in images]

+ 

+         return images

+ 

+     def _get_containers(self):

+         '''Return a list of all containers on the host.'''

+ 

+         # This is a go template to clean up docker ps output

+         template = "{{.ID}}\t{{.Command}}\t{{.Status}}\t{{.Ports}}\t{{.Names}}"

+ 

+         raw_output = sp.check_output(['docker',

+                                       'ps',

+                                       '-a',

+                                       '--format',

+                                       template])

+ 

+         containers = raw_output.split('\n')

+ 

+         # Clean up the output a bit, also remove the last, empty, entry

+         containers = [x.split('\t') for x in containers][:-1]

+ 

+         attrs = ['id', 'command', 'status', 'ports', 'names']

+         containers = [dict(zip(attrs, container)) for container in containers]

+ 

+         return containers

+ 

+     def _find_container(self, search_term):

+         '''Find a container based on either name or id.'''

+ 

+         for container in self.containers:

+             if search_term in container.values():

+                 return container

+             else:

+                 pass

+ 

+         return "No container found by {}.".format(search_term)

+ 

+     def _manipulate_container(self, identifier, action=None):

+         '''Start or stop a container based on either it's name or id.

+ 

+         :param action: command for docker, either 'start', 'stop', or 'rm'

+         :param identifier: a container ID or Name'''

+ 

+         if action not in ['start', 'stop', 'rm']:

+             raise ClientError("Available actions are: start, stop, and rm. "

+                               "Could not perform: {}".format(action))

+ 

+         container = self._find_container(identifier)

+         try:

+             sp.check_call(['docker',

+                            action,

+                            container['names']])

+ 

+         except:

+             log.debug("Failed to {} the container '{}'.".format(action, container['names']))

+             raise ClientError("Could not {} the container.".format(action))

+ 

+     def build_image(self, path_to_dockerfile_dir, imgtag='taskotron-worker'):

+         '''Run `docker build Dockerfile` against the specified path.'''

+         sp.check_call(['docker',

+                        'build',

+                        '-t',

+                        imgtag,

+                        path_to_dockerfile_dir])

+         # Update our lists

+         self.images = self._get_images()

+         self.owned_images.append(imgtag)

+ 

+     def create_container(self,

+                          image_name='test',

+                          container_name='taskotron-worker',

+                          uuid=None,

+                          artifacts=None):

+         '''Method to create a container from a specified image.

+         :param image_name: the name of the built image you want to launch.

+         '''

+ 

+         # Build the image if it's not already there

+         image_found = False

+         for image in self.images:

+             if image_name == image['name']:

+                 log.debug("Image already exists, skipping image build...")

+                 image_found = True

+ 

+         if not image_found:

+             log.debug("Image not found, building...")

+             self.build_image(os.path.abspath(os.curdir) + '/libtaskotron/ext/docker/')

+ 

+         if uuid:

+             container_name += "-{}".format(uuid)

+ 

+         with open (os.path.join('port'),'w') as f:

+             f.write('%s'%self.port)

+         # check if the port is being used to avoid port bind failure

+         port_idle=sp.call('netstat -anp|grep -f port',shell=True)

+         if port_idle:

+             host_port=self.port

+         else:

+             host_port = random.randrange(2200, 2300)

+             self.port = host_port

+ 

+         log.info("Container will be available on port: {}".format(host_port))

+         print artifacts

+         sp.check_call(['docker',

+                        'run',

+                        '-d',

+                        '--name', container_name,

+                        '-p', '{}:22'.format(host_port),

+                        '-v', '{}:/artifacts'.format(artifacts),

+                        image_name])

+ 

+         # Update our list of all containers

+         self.containers = self._get_containers()

+ 

+         # Update the list of owned_containers

+         self.owned_containers.append(self._find_container(container_name))

+ 

+ 

+     def start_container(self, identifier):

+         self._manipulate_container(identifier, action='start')

+ 

+     def stop_container(self, identifier):

+         self._manipulate_container(identifier, action='stop')

+ 

+     def rm_container(self, identifier):

+         # Update our list of owned containers, since we're deleting it

+         for i in xrange(len(self.owned_containers)):

+             if self.owned_containers[i]['names'] or self.owned_containers[i]['id'] == identifier:

+                 self.owned_containers.pop(i)

+                 break

+ 

+         self._manipulate_container(identifier, action='rm')

+ 

+     def clear_all(self):

+         '''Clean out all owned containers and images.'''

+ 

+         log.info("Clearing out all our containers...")

+         for container in self.owned_containers:

+             self.stop_container(container['names'])

+             self.rm_container(container['names'])

+ 

+         log.info("Clearing out all our images...")

+         for image in self.owned_images:

+             sp.check_call(['docker',

+                            'rmi',

+                            image])

+ class ClientError(Exception):

+     '''General exceptions we might run into using this client.'''

+     pass

@@ -0,0 +1,23 @@ 

+ #FROM registry.fedoraproject.org/fedora:25

+ FROM fedora:25

+ 

+ RUN dnf install openssh-server PyYAML libtaskotron-core libtaskotron-fedora \

+     libtaskotron-config python-solv python-librepo passwd -y && \

+     mkdir -p /var/log/taskotron /srv/taskotron/artifacts && \

+     dnf clean all

+ 

+ ENV LANG C.utf8

+ ENV LC_ALL C.utf8

+ 

+ ADD https://pagure.io/taskotron/libtaskotron/blob/master/f/conf/yumrepoinfo.conf.example /etc/taskotron/yumrepoinfo.conf

+ ADD taskotron.yaml /etc/taskotron/taskotron.yaml

+ ADD namespaces.yaml /etc/taskotron/namespaces.yaml

+ 

+ RUN echo "passw0rd" | passwd --stdin root

+ RUN ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ""

+ 

+ EXPOSE 22

+ 

+ CMD ["/sbin/sshd", "-D"]

+ 

+ 

file modified
+2
@@ -98,6 +98,8 @@ 

                          help="path to private key for remote connections over ssh")

      parser.add_argument("--no-destroy", action="store_true",

                          help="do not destroy disposable client at the end of task execution")

+     parser.add_argument("--docker", action="store_true",

+                         help="Run the task inside a docker container.")

  

      return parser

  

file modified
+4 -3
@@ -1,4 +1,4 @@ 

- - hosts: all

+ - hosts: localhost

    remote_user: root

    vars:

      local: false
@@ -19,7 +19,8 @@ 

          dest: "{{ client_taskdir }}"

  

      - name: create artifacts dir

-       command: mkdir -p "{{ artifacts }}"

+       file:

+         path: "{{ artifacts }}"

        when: not local

  

      - name: Install required packages
@@ -48,6 +49,6 @@ 

      - name: Collect logs

        synchronize:

          mode: pull

-         src: "{{ artifacts }}/*"

+         src: "{{ artifacts }}"

          dest: "{{ artifacts }}"

        when: not local

I have modified roshi's docker code:)
Just want to have a try and lots of follow-up work need to be done,that's for sure.
test plan:
git clone https://pagure.io/task-rpmlint-ansible
runtask -d -i gzip-1.8-1.fc25 -t koji_build --docker -a x86_64 task-rpmlint-ansible/run_tests.yml
or git clone https://github.com/stefwalter/gzip-dist-git
runtask -d -i gzip -t koji_build -a x86_64 --docker gzip-dist-git/tests/test_rpm.yml

Hi Lili. Are you saying this is a modification of PR #388? What exactly changed? Have you only rebased it against current feature/ansiblize or performed further modifications?

Also, please note that I'd like to merge feature/ansiblize into develop first, that is our current top priority. Then we can deal with docker.

Hi Lili. Are you saying this is a modification of PR #388? What exactly changed? Have you only >rebased it against current feature/ansiblize or performed further modifications?

I have rebased it against current 'feature/ansiblize' and modified some code according to the comments from https://phab.qa.fedoraproject.org/D1196,
as it seems that roshi doesn't has enough time to do the modification.
I also have fixed some potential exceptions,such as port bind errors when taskotron tries to use an occupied port,though I just found my resolution is not strong enough,
and will update the code when it's asked.

Also, please note that I'd like to merge feature/ansiblize into develop first, that is our >current top priority. Then we can deal with docker.

Thanks for making it clear,don't want to be so reckless,just get some time and try to do a help as it seems that there is some confusion about docker during our last team meeting,
and our team members are busy with all kinds of other important things.I also have found a few other errors(just now) in feature/anisblize itself( fixed one in this pull request),
should I fix it now,or wait for a period of time ?

Thanks for explanation, and the effort. Let's make feature/ansiblize working first, and then we'll look at this. If you see any issues with feature/ansiblize, feel free to submit PRs against it. It's better to submit multiple smaller PRs, each fixing one particular problem, than a single big PR changing too many things at once. Also, if you want recommendations what to work on (not necessarily libtaskotron), I'm sure we can come up with something.

If this is "just" rebase + fixes of Mike's patch, I'd like to see it split into (at least) two commits - one which handles the rebasing, and the second that applies the changes, to preserve Mike's authorship/contribuiton as much as possible.

Thanks for explanation, and the effort. Let's make feature/ansiblize working first, and then we'll >look at this. If you see any issues with feature/ansiblize, feel free to submit PRs against it. It's >better to submit multiple smaller PRs, each fixing one particular problem, than a single big PR >changing too many things at once. Also, if you want recommendations what to work on (not >necessarily libtaskotron), I'm sure we can come up with something.

Sure thing, and gonna to submit smaller ones ,though just little modifications.

If this is "just" rebase + fixes of Mike's patch, I'd like to see it split into (at least) two commits - >one which handles the rebasing, and the second that applies the changes, to preserve Mike's >authorship/contribuiton as much as possible.

That's really make the point, this is my first pull request,I don't really clear about how it works,as you know,I even asked you whether there is a way to get the code reviewed yesterday.Thinking it will cause conflicts if I make a new pr on top of Mike's,I'm hesitating about whether I should make the new pr while learning how to make pr.But I finally click the button,as I 'm 100% sure this won't be merged,one reason is it need further modification,the second is I want to make a pr.

Anyway, disregarding the formal issues, this also really needs tests in order to be merged. Please have a look at how our code is tested (we use py.test, and all the tests are located in the /testing directory in the git root), and add reasonable tests covering the added functionality. We can make sure the code is rightfully appropriated at merge-time.

Anyway, disregarding the formal issues, this also really needs tests in order to be merged. >Please have a look at how our code is tested (we use py.test, and all the tests are located in the >/testing directory in the git root), and add reasonable tests covering the added functionality. We >can make sure the code is rightfully appropriated at merge-time.

Got it,and gonna to do.

Pull-Request has been closed by lnie

6 years ago

This has been replaced by #400.