#1194 backend: more deterministic test_delete_not_finished_workers
Merged 4 years ago by praiskup. Opened 4 years ago by praiskup.

@@ -160,10 +160,16 @@

          """

          Process the task (priority) queue.

          """

+         now = None

          start_time = time.time()

  

-         while time.time() - start_time < timeout:

-             self._cleanup_workers()

+         while True:

+             now = start_time if now is None else time.time()

+ 

+             if not now - start_time < timeout:

+                 break

+ 

+             self._cleanup_workers(now)

  

              worker_count = len(self.worker_ids())

              if worker_count >= self.max_workers:
@@ -183,11 +189,11 @@

                  # to do.  Just simply wait till the end of the cycle.

                  break

  

-             self._start_worker(task)

+             self._start_worker(task, now)

  

-     def _start_worker(self, task):

+     def _start_worker(self, task, time_now):

          worker_id = self.get_worker_id(repr(task))

-         self.redis.hset(worker_id, 'allocated', time.time())

+         self.redis.hset(worker_id, 'allocated', time_now)

          self.log.info("Starting worker %s", worker_id)

          self.start_task(worker_id, task)

  
@@ -195,9 +201,7 @@

          'remove all tasks from queue'

          self.tasks = JobQueue()

  

-     def _cleanup_workers(self):

-         now = time.time()

- 

+     def _cleanup_workers(self, now):

          for worker_id in self.worker_ids():

              info = self.redis.hgetall(worker_id)

  

@@ -130,7 +130,7 @@

      def test_worker_starts(self):

          task = self.worker_manager.tasks.pop_task()

          assert task.id == 0

-         self.worker_manager._start_worker(task)

+         self.worker_manager._start_worker(task, time.time())

          worker_id = self.worker_manager.get_worker_id(repr(task))

          assert len(self.redis.keys(worker_id)) == 1

  
@@ -172,23 +172,30 @@

                  return params

          return params

  

-     def test_delete_not_finished_workers(self):

+     @patch('backend.worker_manager.time.time')

+     def test_delete_not_finished_workers(self, mc_time):

          self.worker_manager.environ = {'FAIL_STARTED': '1'}

          self.worker_manager.worker_timeout_deadcheck = 0.4

  

-         # start toy:0

-         self.worker_manager.run(timeout=0.0001)

+         # each time.time() call incremented by 1

+         mc_time.side_effect = range(1000)

+ 

+         # first loop just starts the toy:0 worker

+         with patch('backend.worker_manager.time.sleep'):

+             self.worker_manager.run(timeout=1)

  

          params = self.wait_field(self.w0, 'started')

          assert self.w0 in self.workers()

          assert 'started' in params

  

          # toy 0 is marked for deleting

-         self.worker_manager.run(timeout=0.0001)

+         with patch('backend.worker_manager.time.sleep'):

+             self.worker_manager.run(timeout=1)

          assert 'delete' in self.redis.hgetall(self.w0)

  

          # toy 0 should be deleted

-         self.worker_manager.run(timeout=0.0001)

+         with patch('backend.worker_manager.time.sleep'):

+             self.worker_manager.run(timeout=1)

          keys = self.workers()

          assert self.w1 in keys

          assert self.w0 not in keys

backend: more deterministic test_delete_not_finished_workers

Previously we relied on the fact that background action processor is
able to start (== fork() and communicate with redis) within a very
limited time period, and it often wasn't enough.

Instead of increasing the period, make sure that we control the
time.time() call in worker-manager so we 100% know how many loops happen
in WorkerManager.run().  This requires us to limit the number of calls
to time.time().

rebased onto 2a817f853d9d7b90939fd9475f16fceba3801347

4 years ago

Metadata Update from @praiskup:
- Request assigned

4 years ago

rebased onto 4d0b797

4 years ago

Pull-Request has been merged by praiskup

4 years ago