#274 Use the flufl.lock instead of threading lock in KojiTagCache.
Merged 4 years ago by jkaluza. Opened 4 years ago by jkaluza.
jkaluza/odcs mock-runroot  into  master

file modified
+40 -25
@@ -24,7 +24,8 @@ 

  import time

  import os

  import shutil

- import threading

+ from datetime import timedelta

+ from flufl.lock import Lock

  

  from odcs.common.types import PungiSourceType, COMPOSE_STATES

  from odcs.server import conf, log
@@ -43,14 +44,9 @@ 

      ODCS compose attributes which influences the repodata.

      """

  

-     # This class is thread-safe and only single backend thread can change the

-     # content of koji_tag_cache in the same time. This lock guards that.

-     _cache_lock = threading.Lock()

- 

      def __init__(self):

          self.cache_dir = os.path.join(conf.target_dir, "koji_tag_cache")

-         with KojiTagCache._cache_lock:

There is no need to guard creation of top-level cache_dir. If there is a race between two processes when creating it, the makedirs will simply return without any error, because EEXIST is catched there.

-             makedirs(self.cache_dir)

+         makedirs(self.cache_dir)

  

      def remove_old_koji_tag_cache_data(self):

          """
@@ -114,11 +110,10 @@ 

          :return: Returns True when there exists cached compose for input

              compose.

          """

-         with KojiTagCache._cache_lock:

There is no need to guard here, either the directory exists or not. If other process is in the phase of creating it and it has the directory locked, we will wait for that lock later when we try to use it.

-             if compose.source_type != PungiSourceType.KOJI_TAG:

-                 return False

+         if compose.source_type != PungiSourceType.KOJI_TAG:

+             return False

  

-             return os.path.exists(self.cached_compose_dir(compose))

+         return os.path.exists(self.cached_compose_dir(compose))

  

      def reuse_cached(self, compose):

          """
@@ -128,10 +123,18 @@ 

          :param models.Compose compose: Compose which will reuse the older

              cached compose.

          """

-         with KojiTagCache._cache_lock:

-             cached_compose_dir = self.cached_compose_dir(compose)

-             log.info("Reusing repodata from old cached compose %s",

-                      cached_compose_dir)

+ 

+         cached_compose_dir = self.cached_compose_dir(compose)

+         log.info("Reusing repodata from old cached compose %s",

+                  cached_compose_dir)

+ 

+         # Create the lock. The rmtree and copytree on same fs should not take more

+         # than 3 minutes really.

+         lock = Lock(cached_compose_dir + ".lock")

+         lock.lifetime = timedelta(minutes=3)

+ 

+         try:

+             lock.lock()

  

              compose_dir_name = "odcs-%d-1-19700101.n.0" % compose.id

              compose_dir = os.path.join(self.cache_dir, compose_dir_name)
@@ -139,6 +142,9 @@ 

              if os.path.exists(compose_dir):

                  shutil.rmtree(compose_dir)

              shutil.copytree(cached_compose_dir, compose_dir, symlinks=True)

+         finally:

+             if lock.is_locked:

+                 lock.unlock()

  

      def cleanup_reused(self, compose):

          """
@@ -158,16 +164,25 @@ 

  

          :param models.Compose compose: Compose to update the cache from.

          """

-         with KojiTagCache._cache_lock:

-             if (compose.source_type != PungiSourceType.KOJI_TAG or

-                     compose.state != COMPOSE_STATES["done"]):

-                 log.info("Not caching the compose %s.", compose)

-                 return

- 

-             log.info("Caching the compose %s", compose)

-             cached_compose_dir = self.cached_compose_dir(compose)

-             compose_dir = os.path.realpath(compose.toplevel_dir)

- 

+         if (compose.source_type != PungiSourceType.KOJI_TAG or

+                 compose.state != COMPOSE_STATES["done"]):

+             log.info("Not caching the compose %s.", compose)

+             return

+ 

+         log.info("Caching the compose %s", compose)

+         cached_compose_dir = self.cached_compose_dir(compose)

+         compose_dir = os.path.realpath(compose.toplevel_dir)

+ 

+         # Create the lock. The rmtree and copytree on same fs should not take more

+         # than 3 minutes really.

+         lock = Lock(cached_compose_dir + ".lock")

+         lock.lifetime = timedelta(minutes=3)

+ 

+         try:

+             lock.lock()

              if os.path.exists(cached_compose_dir):

                  shutil.rmtree(cached_compose_dir)

              shutil.copytree(compose_dir, cached_compose_dir, symlinks=True)

+         finally:

+             if lock.is_locked:

+                 lock.unlock()

We use flufl.lock already in the mergerepo.py to lock NFS directories. This just uses this also for KojiTagCache to guard access to cached directory in case multiple processes would try accessing it.

There is no need to guard creation of top-level cache_dir. If there is a race between two processes when creating it, the makedirs will simply return without any error, because EEXIST is catched there.

There is no need to guard here, either the directory exists or not. If other process is in the phase of creating it and it has the directory locked, we will wait for that lock later when we try to use it.

Pull-Request has been merged by jkaluza

4 years ago