#330 [backend] resolving pylint warnings - logging format interpolation and singleton comparison
Merged 5 years ago by frostyx. Opened 5 years ago by pjunak.
copr/ pjunak/copr pylint2  into  master

file modified
+40 -40
@@ -70,8 +70,8 @@ 

  

          done_count = 0

          for chroot in chroots:

-             self.log.info("Creating repo for: {}/{}/{}"

-                           .format(username, projectname, chroot))

+             self.log.info("Creating repo for: %s/%s/%s",

+                           username, projectname, chroot)

  

              path = self.get_chroot_result_dir(chroot, projectname, username)

  
@@ -87,8 +87,8 @@ 

                      return

  

              except CreateRepoError:

-                 self.log.exception("Error making local repo for: {}/{}/{}"

-                                    .format(username, projectname, chroot))

+                 self.log.exception("Error making local repo for: %s/%s/%s",

+                                    username, projectname, chroot)

  

          if done_count == len(chroots):

              result.result = ActionResult.SUCCESS
@@ -118,14 +118,14 @@ 

  

      def handle_fork(self, result):

          sign = self.opts.do_sign

-         self.log.info("Action fork {}".format(self.data["object_type"]))

+         self.log.info("Action fork %s", self.data["object_type"])

          data = json.loads(self.data["data"])

          old_path = os.path.join(self.destdir, self.data["old_value"])

          new_path = os.path.join(self.destdir, self.data["new_value"])

          builds_map = json.loads(self.data["data"])["builds_map"]

  

          if not os.path.exists(old_path):

-             self.log.info("Source copr directory doesn't exist: {}".format(old_path))

+             self.log.info("Source copr directory doesn't exist: %s", old_path)

              result.result = ActionResult.FAILURE

              return

  
@@ -167,7 +167,7 @@ 

                  if sign:

                      sign_rpms_in_dir(data["user"], data["copr"], dst_path, opts=self.opts, log=self.log)

  

-                 self.log.info("Forked build {} as {}".format(src_path, dst_path))

+                 self.log.info("Forked build %s as %s", src_path, dst_path)

  

              for chroot_path in chroot_paths:

                  createrepo(path=chroot_path, front_url=self.front_url,
@@ -188,7 +188,7 @@ 

          project = self.data["old_value"]

          path = os.path.normpath(self.destdir + '/' + project)

          if os.path.exists(path):

-             self.log.info("Removing copr {0}".format(path))

+             self.log.info("Removing copr %s", path)

              shutil.rmtree(path)

  

      def handle_comps_update(self, result):
@@ -209,16 +209,16 @@ 

          result.result = ActionResult.SUCCESS

          if not ext_data.get("comps_present", True):

              silent_remove(local_comps_path)

-             self.log.info("deleted comps.xml for {}/{}/{} from {} "

-                           .format(ownername, projectname, chroot, remote_comps_url))

+             self.log.info("deleted comps.xml for %s/%s/%s from %s ",

+                           ownername, projectname, chroot, remote_comps_url)

          else:

              try:

                  urlretrieve(remote_comps_url, local_comps_path)

-                 self.log.info("saved comps.xml for {}/{}/{} from {} "

-                               .format(ownername, projectname, chroot, remote_comps_url))

+                 self.log.info("saved comps.xml for %s/%s/%s from %s ",

+                               ownername, projectname, chroot, remote_comps_url)

              except Exception:

-                 self.log.exception("Failed to update comps from {} at location {}"

-                                    .format(remote_comps_url, local_comps_path))

+                 self.log.exception("Failed to update comps from %s at location %s",

+                                    remote_comps_url, local_comps_path)

                  result.result = ActionResult.FAILURE

  

      def handle_module_md_update(self, result):
@@ -238,16 +238,16 @@ 

          result.result = ActionResult.SUCCESS

          if not ext_data.get("module_md_present", True):

              silent_remove(local_module_md_path)

-             self.log.info("deleted module_md.yaml for {}/{}/{} from {} "

-                           .format(ownername, projectname, chroot, remote_module_md_url))

+             self.log.info("deleted module_md.yaml for %s/%s/%s from %s ",

+                           ownername, projectname, chroot, remote_module_md_url)

          else:

              try:

                  urlretrieve(remote_module_md_url, local_module_md_path)

-                 self.log.info("saved module_md.yaml for {}/{}/{} from {} "

-                               .format(ownername, projectname, chroot, remote_module_md_url))

+                 self.log.info("saved module_md.yaml for %s/%s/%s from %s ",

+                               ownername, projectname, chroot, remote_module_md_url)

              except Exception:

-                 self.log.exception("Failed to update module_md from {} at location {}"

-                                    .format(remote_module_md_url, local_module_md_path))

+                 self.log.exception("Failed to update module_md from %s at location %s",

+                                    remote_module_md_url, local_module_md_path)

                  result.result = ActionResult.FAILURE

  

      def handle_delete_build(self):
@@ -258,7 +258,7 @@ 

          projectname = ext_data["projectname"]

          chroot_builddirs = ext_data["chroot_builddirs"]

  

-         self.log.info("Going to delete: {}".format(chroot_builddirs))

+         self.log.info("Going to delete: %s", chroot_builddirs)

  

          for chroot, builddir in chroot_builddirs.items():

              if not builddir:
@@ -268,13 +268,13 @@ 

              builddir_path = os.path.join(chroot_path, builddir)

  

              if not os.path.isdir(builddir_path):

-                 self.log.error("{0} not found".format(builddir_path))

+                 self.log.error("%s not found", builddir_path)

                  continue

  

-             self.log.debug("builddir to be deleted {0}".format(builddir_path))

+             self.log.debug("builddir to be deleted %s", builddir_path)

              shutil.rmtree(builddir_path)

  

-             self.log.debug("Running createrepo on {0}".format(chroot_path))

+             self.log.debug("Running createrepo on %s", chroot_path)

              result_base_url = "/".join(

                  [self.results_root_url, ownername, projectname, chroot])

  
@@ -285,13 +285,13 @@ 

                      username=ownername, projectname=projectname)

              except CoprRequestException:

                  # FIXME: dirty hack to catch the case when createrepo invoked upon a deleted project

-                 self.log.exception("Project {0}/{1} has been deleted on frontend".format(ownername, projectname))

+                 self.log.exception("Project %s/%s has been deleted on frontend", ownername, projectname)

              except CreateRepoError:

-                 self.log.exception("Error making local repo: {}".format(full_path))

+                 self.log.exception("Error making local repo: %s", full_path)

  

      def handle_generate_gpg_key(self, result):

          ext_data = json.loads(self.data["data"])

-         self.log.info("Action generate gpg key: {}".format(ext_data))

+         self.log.info("Action generate gpg key: %s", ext_data)

  

          username = ext_data["username"]

          projectname = ext_data["projectname"]
@@ -315,14 +315,14 @@ 

          try:

              chrootdir = os.path.join(self.opts.destdir, data["user"], data["copr"], data["dest_chroot"])

              if not os.path.exists(chrootdir):

-                 self.log.debug("Create directory: {}".format(chrootdir))

+                 self.log.debug("Create directory: %s", chrootdir)

                  os.makedirs(chrootdir)

  

              for build in data["builds"]:

                  srcdir = os.path.join(self.opts.destdir, data["user"], data["copr"], data["rawhide_chroot"], build)

                  if os.path.exists(srcdir):

                      destdir = os.path.join(chrootdir, build)

-                     self.log.debug("Copy directory: {} as {}".format(srcdir, destdir))

+                     self.log.debug("Copy directory: %s as %s", srcdir, destdir)

                      shutil.copytree(srcdir, destdir)

  

                      with open(os.path.join(destdir, "build.info"), "a") as f:
@@ -344,9 +344,9 @@ 

          vmm = VmManager(self.opts)

          vmd = vmm.get_vm_by_task_id(task_id)

          if vmd:

-             self.log.info("Found VM {0} for task {1}".format(vmd.vm_ip, task_id))

+             self.log.info("Found VM %s for task %s", vmd.vm_ip, task_id)

          else:

-             self.log.error("No VM found for task {0}".format(task_id))

+             self.log.error("No VM found for task %s", task_id)

              result.result = ActionResult.FAILURE

              return

  
@@ -360,7 +360,7 @@ 

          try:

              rc, out, err = conn.run_expensive(cmd)

          except SSHConnectionError:

-             self.log.exception("Error running cmd: {}".format(cmd))

+             self.log.exception("Error running cmd: %s", cmd)

              result.result = ActionResult.FAILURE

              return

  
@@ -373,7 +373,7 @@ 

          try:

              pid = int(out.strip())

          except ValueError:

-             self.log.exception("Invalid pid {} received".format(out))

+             self.log.exception("Invalid pid %s received", out)

              result.result = ActionResult.FAILURE

              return

  
@@ -381,7 +381,7 @@ 

          try:

              rc, out, err = conn.run_expensive(cmd)

          except SSHConnectionError:

-             self.log.exception("Error running cmd: {}".format(cmd))

+             self.log.exception("Error running cmd: %s", cmd)

              result.result = ActionResult.FAILURE

              return

  
@@ -410,7 +410,7 @@ 

                  destdir = os.path.join(project_path, "modules", module_relpath)

  

                  if os.path.exists(destdir):

-                     self.log.warning("Module {0} already exists. Omitting.".format(destdir))

+                     self.log.warning("Module %s already exists. Omitting.", destdir)

                  else:

                      # We want to copy just the particular module builds

                      # into the module destdir, not the whole chroot
@@ -419,15 +419,15 @@ 

                      dirs = [d for d in os.listdir(srcdir) if d.startswith(tuple(prefixes))]

                      for folder in dirs:

                          shutil.copytree(os.path.join(srcdir, folder), os.path.join(destdir, folder))

-                         self.log.info("Copy directory: {} as {}".format(

-                             os.path.join(srcdir, folder), os.path.join(destdir, folder)))

+                         self.log.info("Copy directory: %s as %s",

+                                       os.path.join(srcdir, folder), os.path.join(destdir, folder))

  

                          for f in os.listdir(os.path.join(destdir, folder)):

                              if not f.endswith(".rpm") or f.endswith(".src.rpm"):

                                  continue

                              mmd.artifacts.rpms.add(str(f.rstrip(".rpm")))

  

-                     self.log.info("Module artifacts: {}".format(mmd.artifacts.rpms))

+                     self.log.info("Module artifacts: %s", mmd.artifacts.rpms)

                      modulemd.dump_all(os.path.join(destdir, "modules.yaml"), [mmd])

                      createrepo(path=destdir, front_url=self.front_url,

                                 username=ownername, projectname=projectname,
@@ -440,7 +440,7 @@ 

  

      def run(self):

          """ Handle action (other then builds) - like rename or delete of project """

-         self.log.info("Executing: {}".format(self))

+         self.log.info("Executing: %s", self)

  

          result = Munch()

          result.id = self.data["id"]
@@ -485,7 +485,7 @@ 

          elif action_type == ActionType.CANCEL_BUILD:

              self.handle_cancel_build(result)

  

-         self.log.info("Action result: {}".format(result))

+         self.log.info("Action result: %s", result)

  

          if "result" in result:

              if result.result == ActionResult.SUCCESS and \

@@ -48,13 +48,13 @@ 

                          auth=("user", self.opts.frontend_auth))

                  action_task = r.json()

              except (RequestException, ValueError) as error:

-                 self.log.exception("Retrieving an action task from {} failed with error: {}"

-                                    .format(self.opts.frontend_base_url, error))

+                 self.log.exception("Retrieving an action task from %s failed with error: %s",

+                                    self.opts.frontend_base_url, error)

              finally:

                  if not action_task:

                      time.sleep(self.opts.sleeptime)

  

-         self.log.info("Got new action_task {} of type {}".format(action_task['id'], action_task['action_type']))

+         self.log.info("Got new action_task %s of type %s", action_task['id'], action_task['action_type'])

          return Action(self.opts, action_task, frontend_client=self.frontend_client)

  

      def run(self):

@@ -52,7 +52,7 @@ 

          Starts backend process. Control sub process start/stop.

          """

          self.update_conf()

-         self.log.info("Initial config: {}".format(self.opts))

+         self.log.info("Initial config: %s", self.opts)

  

          try:

              self.log.info("Rescheduling old unfinished builds")

@@ -63,9 +63,9 @@ 

  

              for arch in group["archs"]:

                  self.arch_to_groups[arch].append(group_id)

-                 self.log.debug("mapping {0} to {1} group".format(arch, group_id))

+                 self.log.debug("mapping %s to %s group", arch, group_id)

  

-             self.log.debug("user might use only {0}VMs for {1} group".format(group["max_vm_per_user"], group_id))

+             self.log.debug("user might use only %sVMs for %s group", group["max_vm_per_user"], group_id)

              self.group_to_usermax[group_id] = group["max_vm_per_user"]

  

      def load_jobs(self):
@@ -84,13 +84,13 @@ 

                              auth=("user", self.opts.frontend_auth)).json()

  

              except (RequestException, ValueError) as error:

-                 self.log.exception("Retrieving build jobs from {} failed with error: {}"

-                                    .format(self.opts.frontend_base_url, error))

+                 self.log.exception("Retrieving build jobs from %s failed with error: %s",

+                                    self.opts.frontend_base_url, error)

              finally:

                  if not tasks:

                      time.sleep(self.opts.sleeptime)

  

-         self.log.info("Got new build jobs: {}".format([task.get("task_id") for task in tasks]))

+         self.log.info("Got new build jobs: %s", [task.get("task_id") for task in tasks])

          return [BuildJob(task, self.opts) for task in tasks]

  

      def can_build_start(self, job):
@@ -108,11 +108,11 @@ 

              job.status = BuildStatus.STARTING

              can_build_start = self.frontend_client.starting_build(job.to_dict())

          except (RequestException, ValueError) as error:

-             self.log.exception("Communication with Frontend to confirm build start failed with error: {}".format(error))

+             self.log.exception("Communication with Frontend to confirm build start failed with error: %s", error)

              return False

  

          if not can_build_start:

-             self.log.exception("Frontend forbade to start the job {}".format(job.task_id))

+             self.log.exception("Frontend forbade to start the job %s", job.task_id)

  

          return can_build_start

  
@@ -121,8 +121,8 @@ 

              if not worker.is_alive():

                  worker.join(5)

                  self.workers.remove(worker)

-                 self.log.info("Removed finished worker {} for job {}"

-                               .format(worker.worker_id, worker.job.task_id))

+                 self.log.info("Removed finished worker %s for job %s",

+                               worker.worker_id, worker.job.task_id)

  

      def start_worker(self, vm, job, reattach=False):

          worker = Worker(
@@ -152,8 +152,8 @@ 

                  # first check if we do not have

                  # worker already running for the job

                  if any([job.task_id == w.job.task_id for w in self.workers]):

-                     self.log.warning("Skipping already running task '{}'"

-                                      .format(job.task_id))

+                     self.log.warning("Skipping already running task '%s'",

+                                      job.task_id)

                      continue

  

                  # now search db builder records for the job and
@@ -164,30 +164,30 @@ 

                      worker = self.start_worker(vm, job, reattach=True)

                      worker.mark_running(job)

                      vm.store_field(self.vm_manager.rc, "used_by_worker", worker.worker_id)

-                     self.log.info("Reattached new worker {} for job {}"

-                                   .format(worker.worker_id, worker.job.task_id))

+                     self.log.info("Reattached new worker %s for job %s",

+                                   worker.worker_id, worker.job.task_id)

                      continue

  

                  # ... and if the task is new to us,

                  # allocate new vm and run full build

                  try:

                      vm_group_ids = self.get_vm_group_ids(job.arch)

-                     self.log.info("Picking VM from groups {} for job {}".format(vm_group_ids, job))

+                     self.log.info("Picking VM from groups %s for job %s", vm_group_ids, job)

                      vm = self.vm_manager.acquire_vm(vm_group_ids, job.project_owner, self.next_worker_id,

                                                      job.task_id, job.build_id, job.chroot)

                  except NoVmAvailable as error:

-                     self.log.info("No available resources for task {} (Reason: {}). Deferring job."

-                                   .format(job.task_id, error))

+                     self.log.info("No available resources for task %s (Reason: %s). Deferring job.",

+                                   job.task_id, error)

                      continue

                  else:

-                     self.log.info("VM {} for job {} successfully acquired".format(vm.vm_name, job.task_id))

+                     self.log.info("VM %s for job %s successfully acquired", vm.vm_name, job.task_id)

  

                  if not self.can_build_start(job):

                      self.vm_manager.release_vm(vm.vm_name)

                      continue

  

                  worker = self.start_worker(vm, job)

-                 self.log.info("Started new worker {} for job {}"

-                               .format(worker.worker_id, worker.job.task_id))

+                 self.log.info("Started new worker %s for job %s",

+                               worker.worker_id, worker.job.task_id)

  

              time.sleep(self.opts.sleeptime)

@@ -45,8 +45,8 @@ 

                  continue

              not_re_acquired_in = time.time() - float(last_release)

              if not_re_acquired_in > self.opts.build_groups[vmd.group]["vm_dirty_terminating_timeout"]:

-                 self.log.info("dirty VM `{}` not re-acquired in {}, terminating it"

-                               .format(vmd.vm_name, not_re_acquired_in))

+                 self.log.info("dirty VM `%s` not re-acquired in %s, terminating it",

+                               vmd.vm_name, not_re_acquired_in)

                  self.vmm.start_vm_termination(vmd.vm_name, allowed_pre_state=VmStates.READY)

  

      def check_vms_health(self):
@@ -73,7 +73,7 @@ 

              try:

                  self.checker.run_check_health(vmd.vm_name, vmd.vm_ip)

              except Exception as err:

-                 self.log.exception("Failed to start health check: {}".format(err))

+                 self.log.exception("Failed to start health check: %s", err)

                  if orig_state != VmStates.IN_USE:

                      vmd.store_field(self.vmm.rc, "state", orig_state)

  
@@ -142,12 +142,12 @@ 

              self.log.debug(err.msg)

              return

  

-         self.log.info("Start spawning new VM for group: {}".format(self.opts.build_groups[group]["name"]))

+         self.log.info("Start spawning new VM for group: %s", self.opts.build_groups[group]["name"])

          self.vmm.write_vm_pool_info(group, "last_vm_spawn_start", time.time())

          try:

              self.spawner.start_spawn(group)

          except Exception as error:

-             self.log.exception("Error during spawn attempt: {}".format(error))

+             self.log.exception("Error during spawn attempt: %s", error)

  

      def start_spawn_if_required(self):

          for group in self.vmm.vm_groups:
@@ -184,7 +184,7 @@ 

              try:

                  self.do_cycle()

              except Exception as err:

-                 self.log.error("Unhandled error: {}, {}".format(err, traceback.format_exc()))

+                 self.log.error("Unhandled error: %s, %s", err, traceback.format_exc())

  

      def terminate(self):

          self.kill_received = True
@@ -203,8 +203,8 @@ 

              time_elapsed = time.time() - float(vmd.get_field(self.vmm.rc, "last_health_check") or 0)

              if time_elapsed > self.opts.build_groups[vmd.group]["vm_health_check_max_time"]:

                  self.log.info("VM marked with check fail state, "

-                               "VM stayed too long in health check state, elapsed: {} VM: {}"

-                               .format(time_elapsed, str(vmd)))

+                               "VM stayed too long in health check state, elapsed: %s VM: %s",

+                               time_elapsed, str(vmd))

                  self.vmm.mark_vm_check_failed(vmd.vm_name)

  

      def terminate_again(self):
@@ -221,9 +221,9 @@ 

              if time_elapsed > self.opts.build_groups[vmd.group]["vm_terminating_timeout"]:

                  if len(self.vmm.lookup_vms_by_ip(vmd.vm_ip)) > 1:

                      self.log.info(

-                         "Removing VM record: {}. There are more VM with the same ip, "

-                         "it's safe to remove current one from VM pool".format(vmd.vm_name))

+                         "Removing VM record: %s. There are more VM with the same ip, "

+                         "it's safe to remove current one from VM pool", vmd.vm_name)

                      self.vmm.remove_vm_from_pool(vmd.vm_name)

                  else:

-                     self.log.info("Sent VM {} for termination again".format(vmd.vm_name))

+                     self.log.info("Sent VM %s for termination again", vmd.vm_name)

                      self.vmm.start_vm_termination(vmd.vm_name, allowed_pre_state=VmStates.TERMINATING)

@@ -49,7 +49,7 @@ 

              return self.opts.build_groups[self.vm.group]["name"]

          except Exception as error:

              self.log.exception("Failed to get builder group name from config, using group_id as name."

-                                "Original error: {}".format(error))

+                                "Original error: %s", error)

              return str(self.vm.group)

  

  
@@ -79,7 +79,7 @@ 

          """

          job.ended_on = time.time()

          self.return_results(job)

-         self.log.info("worker finished build: {0}".format(self.vm.vm_ip))

+         self.log.info("worker finished build: %s", self.vm.vm_ip)

          self._announce('build.end', job)

  

      def mark_running(self, job):
@@ -88,7 +88,7 @@ 

          """

          job.status = BuildStatus.RUNNING

          build = job.to_dict()

-         self.log.info("starting build: {}".format(build))

+         self.log.info("starting build: %s", build)

  

          data = {"builds": [build]}

          try:
@@ -100,11 +100,11 @@ 

          """

          Send the build results to the frontend

          """

-         self.log.info("Build {} finished with status {}"

-                       .format(job.build_id, job.status))

+         self.log.info("Build %s finished with status %s",

+                       job.build_id, job.status)

  

          if job.started_on: # unset for reattached builds

-             self.log.info("Took {} seconds.".format(job.ended_on - job.started_on))

+             self.log.info("Took %s seconds.", job.ended_on - job.started_on)

  

          data = {"builds": [job.to_dict()]}

  
@@ -167,8 +167,8 @@ 

              try:

                  os.makedirs(job.chroot_dir)

              except (OSError, IOError):

-                 self.log.exception("Could not make results dir for job: {}"

-                                    .format(job.chroot_dir))

+                 self.log.exception("Could not make results dir for job: %s",

+                                    job.chroot_dir)

                  failed = True

  

          if not self.reattach:
@@ -183,8 +183,8 @@ 

              # and run a series of checks on the package before we

              # start the build - most importantly license checks.

  

-             self.log.info("Starting build: id={} builder={} job: {}"

-                           .format(job.build_id, self.vm.vm_ip, job))

+             self.log.info("Starting build: id=%s builder=%s job: %s",

+                           job.build_id, self.vm.vm_ip, job)

  

              with local_file_logger(

                  "{}.builder.mr".format(self.name),
@@ -211,15 +211,14 @@ 

                  except MockRemoteError as e:

                      # record and break

                      self.log.exception(

-                         "Error during the build, host={}, build_id={}, chroot={}"

-                         .format(self.vm.vm_ip, job.build_id, job.chroot)

-                     )

+                         "Error during the build, host=%s, build_id=%s, chroot=%s",

+                         self.vm.vm_ip, job.build_id, job.chroot)

                      failed = True

                      mr.download_results()

  

                  except SSHConnectionError as err:

                      self.log.exception(

-                         "SSH connection stalled: {0}".format(str(err)))

+                         "SSH connection stalled: %s", str(err))

                      # The VM is unusable, don't wait for relatively slow

                      # garbage collector.

                      self.vm_manager.start_vm_termination(self.vm.vm_name)
@@ -244,9 +243,8 @@ 

                          failed = True

  

              self.log.info(

-                 "Finished build: id={} builder={} timeout={} destdir={} chroot={}"

-                 .format(job.build_id, self.vm.vm_ip, job.timeout, job.destdir,

-                         job.chroot))

+                 "Finished build: id=%s builder=%s timeout=%s destdir=%s chroot=%s",

+                 job.build_id, self.vm.vm_ip, job.timeout, job.destdir, job.chroot)

              self.copy_logs(job)

  

          register_build_result(self.opts, failed=failed)
@@ -257,8 +255,8 @@ 

                                    .format(job.build_id, job.chroot))

  

      def collect_built_packages(self, job):

-         self.log.info("Listing built binary packages in {}"

-                       .format(job.results_dir))

+         self.log.info("Listing built binary packages in %s",

+                       job.results_dir)

  

          cmd = (

              "builtin cd {0} && "
@@ -268,11 +266,11 @@ 

          )

          result = run_cmd(cmd, shell=True)

          built_packages = result.stdout.strip()

-         self.log.info("Built packages:\n{}".format(built_packages))

+         self.log.info("Built packages:\n%s", built_packages)

          return built_packages

  

      def get_srpm_url(self, job):

-         self.log.info("Retrieving srpm URL for {}".format(job.results_dir))

+         self.log.info("Retrieving srpm URL for %s", job.results_dir)

          try:

              pattern = os.path.join(job.results_dir, '*.src.rpm')

              srpm_name = os.path.basename(glob.glob(pattern)[0])
@@ -280,7 +278,7 @@ 

          except IndexError:

              srpm_url = ""

  

-         self.log.info("SRPM URL: {}".format(srpm_url))

+         self.log.info("SRPM URL: %s", srpm_url)

          return srpm_url

  

      def get_build_details(self, job):
@@ -293,7 +291,7 @@ 

                  build_details = { "srpm_url": self.get_srpm_url(job) }

              else:

                  build_details = { "built_packages": self.collect_built_packages(job) }

-             self.log.info("build details: {}".format(build_details))

+             self.log.info("build details: %s", build_details)

          except Exception as e:

              self.log.exception(str(e))

              raise CoprWorkerError("Error while collecting built packages for {}.".format(job))
@@ -302,8 +300,8 @@ 

  

      def copy_logs(self, job):

          if not os.path.isdir(job.results_dir):

-             self.log.info("Job results dir doesn't exists, couldn't copy main log; path: {}"

-                           .format(job.results_dir))

+             self.log.info("Job results dir doesn't exists, couldn't copy main log; path: %s",

+                           job.results_dir)

              return

  

          logs_to_copy = [
@@ -316,7 +314,7 @@ 

                  with open(src, "rb") as f_src, gzip.open(dst, "wb") as f_dst:

                      f_dst.writelines(f_src)

              except IOError:

-                 self.log.info("File {} not found".format(src))

+                 self.log.info("File %s not found", src)

  

      def clean_result_directory(self, job):

          """
@@ -327,8 +325,8 @@ 

  

          backup_dir_name = "prev_build_backup"

          backup_dir = os.path.join(job.results_dir, backup_dir_name)

-         self.log.info("Cleaning target directory, results from previous build storing in {}"

-                       .format(backup_dir))

+         self.log.info("Cleaning target directory, results from previous build storing in %s",

+                       backup_dir)

  

          if not os.path.exists(backup_dir):

              os.makedirs(backup_dir)
@@ -360,8 +358,8 @@ 

          try:

              self.do_job(self.job)

          except VmError as error:

-             self.log.exception("Building error: {}".format(error))

+             self.log.exception("Building error: %s", error)

          except Exception as e:

-             self.log.exception("Unexpected error: {}".format(e))

+             self.log.exception("Unexpected error: %s", e)

          finally:

              self.vm_manager.release_vm(self.vm.vm_name)

@@ -46,8 +46,8 @@ 

          rc = get_redis_connection(opts)

          rc.publish(PUBSUB_MB, json.dumps(result))

      except Exception as err:

-         log.exception("Failed to publish msg health check result: {} with error: {}"

-                       .format(result, err))

+         log.exception("Failed to publish msg health check result: %s with error: %s",

+                       result, err)

  

  

  class HealthChecker(Executor):

@@ -95,21 +95,20 @@ 

              vmd = self.vmm.get_vm_by_name(msg["vm_name"])

              check_fails_count = int(vmd.get_field(self.vmm.rc, "check_fails") or 0)

          except VmDescriptorNotFound:

-             self.log.debug("VM record disappeared, ignoring health check results,  msg: {}"

-                            .format(msg))

+             self.log.debug("VM record disappeared, ignoring health check results,  msg: %s", msg)

              return

  

          if msg["result"] == "OK":

              self.lua_scripts["on_health_check_success"](keys=[vmd.vm_key], args=[time.time()])

-             self.log.debug("recording success for ip:{} name:{}".format(vmd.vm_ip, vmd.vm_name))

+             self.log.debug("recording success for ip:%s name:%s", vmd.vm_ip, vmd.vm_name)

          else:

-             self.log.debug("recording check fail: {}".format(msg))

+             self.log.debug("recording check fail: %s", msg)

              self.lua_scripts["record_failure"](keys=[vmd.vm_key])

              fails_count = int(vmd.get_field(self.vmm.rc, "check_fails") or 0)

              max_check_fails = self.opts.build_groups[vmd.group]["vm_max_check_fails"]

              if fails_count > max_check_fails:

-                 self.log.info("check fail threshold reached: {}, terminating: {}"

-                               .format(check_fails_count, msg))

+                 self.log.info("check fail threshold reached: %s, terminating: %s",

+                               check_fails_count, msg)

                  self.vmm.start_vm_termination(vmd.vm_name)

  

      def on_vm_spawned(self, msg):
@@ -120,14 +119,14 @@ 

  

      def on_vm_termination_result(self, msg):

          if msg["result"] == "OK" and "vm_name" in msg:

-             self.log.debug("Vm terminated, removing from pool ip: {}, name: {}, msg: {}"

-                            .format(msg.get("vm_ip"), msg.get("vm_name"), msg.get("msg")))

+             self.log.debug("Vm terminated, removing from pool ip: %s, name: %s, msg: %s",

+                            msg.get("vm_ip"), msg.get("vm_name"), msg.get("msg"))

              self.vmm.remove_vm_from_pool(msg["vm_name"])

          elif "vm_name" not in msg:

-             self.log.debug("Vm termination event missing vm name, msg: {}".format(msg))

+             self.log.debug("Vm termination event missing vm name, msg: %s", msg)

          else:

-             self.log.debug("Vm termination failed ip: {}, name: {}, msg: {}"

-                            .format(msg.get("vm_ip"), msg.get("vm_name"), msg.get("msg")))

+             self.log.debug("Vm termination failed ip: %s, name: %s, msg: %s",

+                            msg.get("vm_ip"), msg.get("vm_name"), msg.get("msg"))

  

      def run(self):

          setproctitle("Event Handler")
@@ -174,4 +173,4 @@ 

                      self.handlers_map[topic](msg)

  

                  except Exception as err:

-                     self.log.exception("Handler error: raw msg: {}, {}".format(raw, err))

+                     self.log.exception("Handler error: raw msg: %s, %s", raw, err)

@@ -58,7 +58,7 @@ 

              else:

                  proc.join()

                  # self.log.debug("Child process finished: {}".format(proc.pid))

-                 self.log.debug("Child process finished: {}".format(proc))

+                 self.log.debug("Child process finished: %s", proc)

                  self.after_proc_finished(proc)

  

          self.child_processes = still_alive

@@ -160,7 +160,7 @@ 

          pipe.sadd(KEY_VM_POOL.format(group=group), vm_name)

          pipe.hmset(KEY_VM_INSTANCE.format(vm_name=vm_name), vmd.to_dict())

          pipe.execute()

-         self.log.info("registered new VM: {} {}".format(vmd.vm_name, vmd.vm_ip))

+         self.log.info("registered new VM: %s %s", vmd.vm_name, vmd.vm_ip)

  

          return vmd

  
@@ -192,13 +192,12 @@ 

          ])

  

          limit = self.opts.build_groups[group].get("max_vm_per_user")

-         self.log.debug("# vm by user: {}, limit:{} ".format(

-             vm_count_used_by_user, limit

-         ))

+         self.log.debug("# vm by user: %s, limit:%s ",

+                        vm_count_used_by_user, limit)

  

          if limit and vm_count_used_by_user >= limit:

-             self.log.debug("No VM are available, user `{}` already acquired #{} VMs"

-                            .format(username, vm_count_used_by_user))

+             self.log.debug("No VM are available, user `%s` already acquired #%s VMs",

+                            username, vm_count_used_by_user)

              return False

          else:

              return True
@@ -229,8 +228,8 @@ 

  

              user_can_acquire_more_vm = self.can_user_acquire_more_vm(ownername, group)

              if not dirtied_by_user and not user_can_acquire_more_vm:

-                 self.log.debug("User {} already acquired too much VMs in group {}"

-                                .format(ownername, group))

+                 self.log.debug("User %s already acquired too much VMs in group %s",

+                                ownername, group)

                  continue

  

              available_vms = dirtied_by_user
@@ -240,12 +239,12 @@ 

  

              for vmd in available_vms:

                  if vmd.get_field(self.rc, "check_fails") != "0":

-                     self.log.debug("VM {} has check fails, skip acquire".format(vmd.vm_name))

+                     self.log.debug("VM %s has check fails, skip acquire", vmd.vm_name)

                  vm_key = KEY_VM_INSTANCE.format(vm_name=vmd.vm_name)

                  if self.lua_scripts["acquire_vm"](keys=[vm_key, KEY_SERVER_INFO],

                                                    args=[ownername, pid, time.time(),

                                                          task_id, build_id, chroot]) == "OK":

-                     self.log.info("Acquired VM :{} {} for pid: {}".format(vmd.vm_name, vmd.vm_ip, pid))

+                     self.log.info("Acquired VM :%s %s for pid: %s", vmd.vm_name, vmd.vm_ip, pid)

                      return vmd

  

          raise NoVmAvailable("No VMs are currently available for task {} of user {}"
@@ -258,10 +257,10 @@ 

          :rtype: bool

          """

          # in_use -> ready

-         self.log.info("Releasing VM {}".format(vm_name))

+         self.log.info("Releasing VM %s", vm_name)

          vm_key = KEY_VM_INSTANCE.format(vm_name=vm_name)

          lua_result = self.lua_scripts["release_vm"](keys=[vm_key], args=[time.time()])

-         self.log.info("Release vm result `{}`".format(lua_result))

+         self.log.info("Release vm result `%s`", lua_result)

          return lua_result == "OK"

  

      def start_vm_termination(self, vm_name, allowed_pre_state=None):
@@ -281,9 +280,9 @@ 

                  "topic": EventTopics.VM_TERMINATION_REQUEST

              }

              self.rc.publish(PUBSUB_MB, json.dumps(msg))

-             self.log.info("VM {} queued for termination".format(vmd.vm_name))

+             self.log.info("VM %s queued for termination", vmd.vm_name)

          else:

-             self.log.info("VM termination `{}` skipped due to: {} ".format(vm_name, lua_result))

+             self.log.info("VM termination `%s` skipped due to: %s ", vm_name, lua_result)

  

      def remove_vm_from_pool(self, vm_name):

          """
@@ -298,7 +297,7 @@ 

          pipe.srem(KEY_VM_POOL.format(group=vmd.group), vm_name)

          pipe.delete(KEY_VM_INSTANCE.format(vm_name=vm_name))

          pipe.execute()

-         self.log.info("removed vm `{}` from pool".format(vm_name))

+         self.log.info("removed vm `%s` from pool", vm_name)

  

      def _load_multi_safe(self, vm_name_list):

          result = []
@@ -306,7 +305,7 @@ 

              try:

                  result.append(VmDescriptor.load(self.rc, vm_name))

              except VmDescriptorNotFound:

-                 self.log.debug("Failed to load VMD: {}".format(vm_name))

+                 self.log.debug("Failed to load VMD: %s", vm_name)

          return result

  

      def get_all_vm_in_group(self, group):
@@ -322,7 +321,7 @@ 

          """

          vmd_list = []

          for group in self.vm_groups:

-             if group == None:

+             if group is None:

                  continue

              vmd_list.extend(self.get_all_vm_in_group(group))

          return vmd_list

@@ -81,21 +81,21 @@ 

          spawn_result = spawn_instance(spawn_playbook, log)

          log.debug("Spawn finished")

      except CoprSpawnFailError as err:

-         log.info("Spawning a builder with pb: {}".format(err.msg))

+         log.info("Spawning a builder with pb: %s", err.msg)

          vm_ip = get_ip_from_log(err.msg)

          vm_name = get_vm_name_from_log(err.msg)

          if vm_ip and vm_name:

              # VM started but failed later during ansible run.

              try:

-                 log.exception("Trying to terminate: {}({}).".format(vm_name, vm_ip))

+                 log.exception("Trying to terminate: %s(%s).", vm_name, vm_ip)

                  terminate.terminate_vm(opts, opts.build_groups[int(group)]["terminate_playbook"], group, vm_name, vm_ip)

              except Exception:

                  # ignore all errors

                  raise

-         log.exception("Error during ansible invocation: {}".format(err.msg))

+         log.exception("Error during ansible invocation: %s", err.msg)

          return

      except Exception as err:

-         log.exception("[Unexpected] Failed to spawn builder: {}".format(err))

+         log.exception("[Unexpected] Failed to spawn builder: %s", err)

          return

  

      spawn_result["group"] = group
@@ -104,8 +104,8 @@ 

          rc = get_redis_connection(opts)

          rc.publish(PUBSUB_MB, json.dumps(spawn_result))

      except Exception as err:

-         log.exception("Failed to publish msg about new VM: {} with error: {}"

-                       .format(spawn_result, err))

+         log.exception("Failed to publish msg about new VM: %s with error: %s",

+                       spawn_result, err)

  

  

  class Spawner(Executor):

@@ -32,7 +32,7 @@ 

      }

      start_time = time.time()

      try:

-         log.info("starting terminate vm with args: {}".format(term_args))

+         log.info("starting terminate vm with args: %s", term_args)

          run_ansible_playbook_cli(args, "terminate instance", log)

          result["result"] = "OK"

      except Exception as error:
@@ -42,11 +42,11 @@ 

          log.exception(msg)

  

      try:

-         log.info("VM terminated {}, time elapsed: {} ".format(term_args, time.time() - start_time))

+         log.info("VM terminated %s, time elapsed: %s ", term_args, time.time() - start_time)

          rc = get_redis_connection(opts)

          rc.publish(PUBSUB_MB, json.dumps(result))

      except Exception as error:

-         log.exception("Failed to publish msg about new VM: {} with error: {}".format(result, error))

+         log.exception("Failed to publish msg about new VM: %s with error: %s", result, error)

  

  

  class Terminator(Executor):
@@ -71,6 +71,6 @@ 

              msg = "Termination playbook {} is missing".format(terminate_playbook)

              raise CoprSpawnFailError(msg)

  

-         self.log.info("received VM ip: {}, name: {} for termination".format(vm_ip, vm_name))

+         self.log.info("received VM ip: %s, name: %s for termination", vm_ip, vm_name)

  

          self.run_detached(terminate_vm, args=(self.opts, terminate_playbook, group, vm_name, vm_ip))

Examples:
W: 73,26: Use % formatting in logging functions and pass the % parameters as arguments (logging-format-interpolation)

C:324,15: Comparison to None should be 'expr is None' (singleton-comparison)

Reason for changing the formatting in logging functions:
https://stackoverflow.com/a/34634301

It really seems to be a good idea, to delay the string interpolation ... +1.

This only makes sense for the debug calls, which might get thrown away if the logging level is higher than 'debug', which probably is the case for backend logging setup. But I think it's good to use instruments for logging interpolation that are directly built into the log.* methods so +1.

Pull-Request has been merged by frostyx

5 years ago