#804 detect submitter from webhook builds
Merged 4 years ago by frostyx. Opened 4 years ago by praiskup.
Unknown source submitter-detection  into  master

@@ -173,8 +173,10 @@

                  try:

                      vm_group_ids = self.get_vm_group_ids(job.arch)

                      self.log.info("Picking VM from groups %s for job %s", vm_group_ids, job)

-                     vm = self.vm_manager.acquire_vm(vm_group_ids, job.project_owner, self.next_worker_id,

-                                                     job.task_id, job.build_id, job.chroot)

+                     vm = self.vm_manager.acquire_vm(

+                         vm_group_ids, job.project_owner, job.sandbox,

+                         self.next_worker_id, job.task_id, job.build_id,

+                         job.chroot)

                  except NoVmAvailable as error:

                      self.log.info("No available resources for task %s (Reason: %s). Deferring job.",

                                    job.task_id, error)

@@ -37,6 +37,7 @@

  # ARGV[4]: task_id

  # ARGV[5]: build_id

  # ARGV[6]: chroot

+ # ARGV[7]: sandbox

  acquire_vm_lua = """

  local old_state = redis.call("HGET", KEYS[1], "state")

  if old_state ~= "ready"  then
@@ -47,7 +48,8 @@

      if last_health_check and server_restart_time and last_health_check > server_restart_time  then

          redis.call("HMSET", KEYS[1], "state", "in_use", "bound_to_user", ARGV[1],

                     "used_by_worker", ARGV[2], "in_use_since", ARGV[3],

-                    "task_id",  ARGV[4], "build_id", ARGV[5], "chroot", ARGV[6])

+                    "task_id",  ARGV[4], "build_id", ARGV[5], "chroot", ARGV[6],

+                    "sandbox", ARGV[7])

          return "OK"

      else

          return nil
@@ -207,16 +209,16 @@

          vmd_list = self.get_all_vm_in_group(group)

          return [vmd for vmd in vmd_list if vmd.state == VmStates.READY]

  

-     def get_dirty_vms(self, group):

-         vmd_list = self.get_all_vm_in_group(group)

-         return [vmd for vmd in vmd_list if vmd.bound_to_user is not None]

- 

-     def acquire_vm(self, groups, ownername, pid, task_id="None", build_id="None", chroot="None"):

+     def acquire_vm(self, groups, ownername, sandbox, pid, task_id="None", build_id="None", chroot="None"):

          """

          Try to acquire VM from pool.

  

          :param list groups: builder group ids where the build can be launched as defined in config

-         :param ownername: job owner, prefer to reuse an existing VM which was used by that same user before

+         :param ownername: the owner name (user or group) this build is going

+                 to be accounted to, at this moment there's max limit for

+                 concurrent jobs accounted to a single owner

+         :param sandbox: sandbox ID required by the build; we prefer to reuse

+                 existing VMs previously used for the same sandbox

          :param pid: worker process id

  

          :rtype: VmDescriptor
@@ -224,27 +226,33 @@

          """

          for group in groups:

              ready_vmd_list = self.get_ready_vms(group)

-             # trying to find VM used by this user

-             dirtied_by_user = [vmd for vmd in ready_vmd_list if vmd.bound_to_user == ownername]

+             # trying to find VM used by the same owner for the same sandbox

+             dirtied = [vmd for vmd in ready_vmd_list

+                        if vmd.sandbox == sandbox and

+                           vmd.bound_to_user == ownername]

  

              user_can_acquire_more_vm = self.can_user_acquire_more_vm(ownername, group)

-             if not dirtied_by_user and not user_can_acquire_more_vm:

+             if not dirtied and not user_can_acquire_more_vm:

                  self.log.debug("User %s already acquired too much VMs in group %s",

                                 ownername, group)

                  continue

  

-             available_vms = dirtied_by_user

+             available_vms = dirtied

              if user_can_acquire_more_vm:

                  clean_list = [vmd for vmd in ready_vmd_list if vmd.bound_to_user is None]

                  available_vms += clean_list

  

              for vmd in available_vms:

-                 if vmd.get_field(self.rc, "check_fails") != "0":

+                 check_fails = vmd.get_field(self.rc, "check_fails")

+                 if check_fails and check_fails != "0":

                      self.log.debug("VM %s has check fails, skip acquire", vmd.vm_name)

+                     continue

+ 

                  vm_key = KEY_VM_INSTANCE.format(vm_name=vmd.vm_name)

                  if self.lua_scripts["acquire_vm"](keys=[vm_key, KEY_SERVER_INFO],

                                                    args=[ownername, pid, time.time(),

-                                                         task_id, build_id, chroot]) == "OK":

+                                                         task_id, build_id,

+                                                         chroot, sandbox]) == "OK":

                      self.log.info("Acquired VM :%s %s for pid: %s", vmd.vm_name, vmd.vm_ip, pid)

                      return vmd

  
@@ -371,7 +379,7 @@

              "in_use_since",

          }

  

-         headers = ['VM Name', 'IP', 'State', 'Health Check', 'User', 'Task info']

+         headers = ['VM Name', 'IP', 'State', 'Health Check', 'Bound to', 'Task info']

  

          def date_to_str(value):

              if value is None:
@@ -398,7 +406,6 @@

                  row.append(vm.vm_name)

                  row.append(vm.vm_ip)

                  row.append(vm.state)

-                 written = ['vm_name', 'vm_ip', 'state']

                  vmd = vm.to_dict()

  

                  row.append("fails: {0}\nlast: {1}".format(
@@ -406,7 +413,18 @@

                      date_to_str(vmd.get('last_health_check', None))

                  ))

  

-                 row.append(vmd.get('bound_to_user', ''))

+                 bound_lines = []

+                 user = vmd.get('bound_to_user', '')

+                 sandbox = vmd.get('sandbox', '')

+                 builds_count = vmd.get('builds_count', '')

+                 if user:

+                     bound_lines.append('user: {0}'.format(user))

+                 if sandbox:

+                     bound_lines.append('sandbox: {0}'.format(sandbox))

+                 if builds_count:

+                     bound_lines.append('builds_count: {0}'.format(builds_count))

+ 

+                 row.append('\n'.join(bound_lines))

  

                  task_info = ''

                  if vmd.get('task_id', None):

@@ -15,6 +15,7 @@

          self.bound_to_user = None

          self.used_by_worker = None

          self.task_id = None

+         self.sandbox = None

  

      @property

      def vm_key(self):

@@ -35,6 +35,9 @@

  GID1 = 0

  GID2 = 1

  

+ # some sandbox string, for tests where we don't care about its value

+ SANDBOX = 'sandbox'

+ 

  class TestManager(object):

  

      def setup_method(self, method):
@@ -130,28 +133,28 @@

          # undefined both last_health_check and server_start_timestamp

          mc_time.time.return_value = 0.1

          with pytest.raises(NoVmAvailable):

-             self.vmm.acquire_vm([GID1], self.ownername, 42)

+             self.vmm.acquire_vm([GID1], self.ownername, 42, SANDBOX)

  

          # only server start timestamp is defined

          mc_time.time.return_value = 1

          self.vmm.mark_server_start()

          with pytest.raises(NoVmAvailable):

-             self.vmm.acquire_vm([GID1], self.ownername, 42)

+             self.vmm.acquire_vm([GID1], self.ownername, 42, SANDBOX)

  

          # only last_health_check defined

          self.rc.delete(KEY_SERVER_INFO)

          vmd.store_field(self.rc, "last_health_check", 0)

          with pytest.raises(NoVmAvailable):

-             self.vmm.acquire_vm([GID1], self.ownername, 42)

+             self.vmm.acquire_vm([GID1], self.ownername, 42, SANDBOX)

  

          # both defined but last_health_check < server_start_time

          self.vmm.mark_server_start()

          with pytest.raises(NoVmAvailable):

-             self.vmm.acquire_vm([GID1], self.ownername, 42)

+             self.vmm.acquire_vm([GID1], self.ownername, 42, SANDBOX)

  

          # and finally last_health_check > server_start_time

          vmd.store_field(self.rc, "last_health_check", 2)

-         vmd_res = self.vmm.acquire_vm([GID1], self.ownername, 42)

+         vmd_res = self.vmm.acquire_vm([GID1], self.ownername, 42, SANDBOX)

          assert vmd.vm_name == vmd_res.vm_name

  

      def test_acquire_vm_extra_kwargs(self, mc_time):
@@ -166,7 +169,8 @@

              "build_id": "20",

              "chroot": "fedora-20-x86_64"

          }

-         vmd_got = self.vmm.acquire_vm([GID1], self.ownername, self.pid, **kwargs)

+         vmd_got = self.vmm.acquire_vm([GID1], self.ownername, self.pid,

+                                       SANDBOX, **kwargs)

          for k, v in kwargs.items():

              assert vmd_got.get_field(self.rc, k) == v

  
@@ -177,9 +181,28 @@

          vmd.store_field(self.rc, "state", VmStates.READY)

          vmd.store_field(self.rc, "last_health_check", 2)

          vmd.store_field(self.vmm.rc, "bound_to_user", "foo")

+         vmd.store_field(self.vmm.rc, "sandbox", SANDBOX)

+         with pytest.raises(NoVmAvailable):

+             self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername,

+                     pid=self.pid, sandbox=SANDBOX)

+         vm = self.vmm.acquire_vm(groups=[GID1], ownername="foo", pid=self.pid,

+                                  sandbox=SANDBOX)

+         assert vm.vm_name == self.vm_name

+ 

+     def test_different_sandbox_cannot_acquire_vm(self, mc_time):

+         mc_time.time.return_value = 0

+         self.vmm.mark_server_start()

+         vmd = self.vmm.add_vm_to_pool(self.vm_ip, self.vm_name, GID1)

+         vmd.store_field(self.rc, "state", VmStates.READY)

+         vmd.store_field(self.rc, "last_health_check", 2)

+         vmd.store_field(self.vmm.rc, "bound_to_user", "foo")

+         vmd.store_field(self.vmm.rc, "sandbox", "sandboxA")

+ 

          with pytest.raises(NoVmAvailable):

-             self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername, pid=self.pid)

-         vm = self.vmm.acquire_vm(groups=[GID1], ownername="foo", pid=self.pid)

+             self.vmm.acquire_vm(groups=[GID1], ownername="foo",

+                     pid=self.pid, sandbox="sandboxB")

+         vm = self.vmm.acquire_vm(groups=[GID1], ownername="foo", pid=self.pid,

+                                  sandbox="sandboxA")

          assert vm.vm_name == self.vm_name

  

      def test_acquire_vm(self, mc_time):
@@ -199,16 +222,22 @@

          vmd2.store_field(self.rc, "last_health_check", 2)

  

          vmd_alt.store_field(self.vmm.rc, "bound_to_user", self.ownername)

+         vmd_alt.store_field(self.vmm.rc, "sandbox", SANDBOX)

  

-         vmd_got_first = self.vmm.acquire_vm([GID1, GID2], ownername=self.ownername, pid=self.pid)

+         vmd_got_first = self.vmm.acquire_vm([GID1, GID2],

+                 ownername=self.ownername, pid=self.pid, sandbox=SANDBOX)

          assert vmd_got_first.vm_name == "vm_alt"

-         vmd_got_second = self.vmm.acquire_vm([GID1, GID2], ownername=self.ownername, pid=self.pid)

+ 

+         vmd_got_second = self.vmm.acquire_vm([GID1, GID2],

+                 ownername=self.ownername, pid=self.pid, sandbox=SANDBOX)

          assert vmd_got_second.vm_name == self.vm_name

  

          with pytest.raises(NoVmAvailable):

-             self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername, pid=self.pid)

+             self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername,

+                     pid=self.pid, sandbox=SANDBOX)

  

-         vmd_got_third = self.vmm.acquire_vm(groups=[GID1, GID2], ownername=self.ownername, pid=self.pid)

+         vmd_got_third = self.vmm.acquire_vm(groups=[GID1, GID2],

+                 ownername=self.ownername, pid=self.pid, sandbox=SANDBOX)

          assert vmd_got_third.vm_name == self.vm2_name

  

      def test_acquire_vm_per_user_limit(self, mc_time):
@@ -224,10 +253,10 @@

              vmd_list.append(vmd)

  

          for idx in range(max_vm_per_user):

-             self.vmm.acquire_vm([GID1], self.ownername, idx)

+             self.vmm.acquire_vm([GID1], self.ownername, idx, SANDBOX)

  

          with pytest.raises(NoVmAvailable):

-             self.vmm.acquire_vm([GID1], self.ownername, 42)

+             self.vmm.acquire_vm([GID1], self.ownername, 42, SANDBOX)

  

      def test_acquire_only_ready_state(self, mc_time):

          mc_time.time.return_value = 0
@@ -240,7 +269,8 @@

                        VmStates.TERMINATING, VmStates.CHECK_HEALTH_FAILED]:

              vmd_main.store_field(self.rc, "state", state)

              with pytest.raises(NoVmAvailable):

-                 self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername, pid=self.pid)

+                 self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername,

+                                     pid=self.pid, sandbox=SANDBOX)

  

      def test_acquire_and_release_vm(self, mc_time):

          mc_time.time.return_value = 0
@@ -252,17 +282,24 @@

          vmd_main.store_field(self.rc, "state", VmStates.READY)

          vmd_alt.store_field(self.rc, "state", VmStates.READY)

          vmd_alt.store_field(self.vmm.rc, "bound_to_user", self.ownername)

+         vmd_alt.store_field(self.vmm.rc, "sandbox", SANDBOX)

          vmd_main.store_field(self.rc, "last_health_check", 2)

          vmd_alt.store_field(self.rc, "last_health_check", 2)

  

-         vmd_got_first = self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername, pid=self.pid)

+         vmd_got_first = self.vmm.acquire_vm(

+             groups=[GID1], ownername=self.ownername, pid=self.pid,

+             sandbox=SANDBOX)

          assert vmd_got_first.vm_name == "vm_alt"

  

          self.vmm.release_vm("vm_alt")

-         vmd_got_again = self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername, pid=self.pid)

+         vmd_got_again = self.vmm.acquire_vm(

+             groups=[GID1], ownername=self.ownername, pid=self.pid,

+             sandbox=SANDBOX)

          assert vmd_got_again.vm_name == "vm_alt"

  

-         vmd_got_another = self.vmm.acquire_vm(groups=[GID1], ownername=self.ownername, pid=self.pid)

+         vmd_got_another = self.vmm.acquire_vm(

+             groups=[GID1], ownername=self.ownername, pid=self.pid,

+             sandbox=SANDBOX)

          assert vmd_got_another.vm_name == self.vm_name

  

      def test_release_only_in_use(self):

@@ -0,0 +1,20 @@

+ """

+ add build.submitted_by field

+ 

+ Revision ID: 1f4e04bb3618

+ Revises: 2d8b4722918b

+ Create Date: 2019-06-10 06:08:14.501770

+ """

+ 

+ import sqlalchemy as sa

+ from alembic import op

+ 

+ 

+ revision = '1f4e04bb3618'

+ down_revision = '2d8b4722918b'

+ 

+ def upgrade():

+     op.add_column('build', sa.Column('submitted_by', sa.Text(), nullable=True))

+ 

+ def downgrade():

+     op.drop_column('build', 'submitted_by')

@@ -763,7 +763,8 @@

  

      @classmethod

      def rebuild_package(cls, package, source_dict_update={}, copr_dir=None, update_callback=None,

-                         scm_object_type=None, scm_object_id=None, scm_object_url=None):

+                         scm_object_type=None, scm_object_id=None,

+                         scm_object_url=None, submitted_by=None):

  

          source_dict = package.source_json_dict

          source_dict.update(source_dict_update)
@@ -789,6 +790,7 @@

              scm_object_type=scm_object_type,

              scm_object_id=scm_object_id,

              scm_object_url=scm_object_url,

+             submitted_by=submitted_by,

          )

          db.session.add(build)

  

@@ -13,6 +13,8 @@

  from libravatar import libravatar_url

  import zlib

  

+ from flask import url_for

+ 

  from copr_common.enums import ActionTypeEnum, BackendResultEnum, FailTypeEnum, ModuleStatusEnum, StatusEnum

  from coprs import constants

  from coprs import db
@@ -832,6 +834,9 @@

      # method to call on build state change

      update_callback = db.Column(db.Text)

  

+     # used by webhook builds; e.g. github.com:praiskup, or pagure.io:jdoe

+     submitted_by = db.Column(db.Text)

+ 

      @property

      def user_name(self):

          return self.user.name
@@ -1062,6 +1067,41 @@

  

          return result

  

+     @property

+     def submitter(self):

+         """

+         Return tuple (submitter_string, submitter_link), while the

+         submitter_link may be empty if we are not able to detect it

+         wisely.

+         """

+         if self.user:

+             user = self.user.name

+             return (user, url_for('coprs_ns.coprs_by_user', username=user))

+ 

+         if self.submitted_by:

+             links = ['http://', 'https://']

+             if any([self.submitted_by.startswith(x) for x in links]):

+                 return (self.submitted_by, self.submitted_by)

+ 

+             return (self.submitted_by, None)

+ 

+         return (None, None)

+ 

+     @property

+     def sandbox(self):

+         """

+         Return a string unique to project + submitter.  At this level copr

+         backend later applies builder user-VM separation policy (VMs are only

+         re-used for builds which have the same build.sandbox value)

+         """

+         submitter, _ = self.submitter

+         if not submitter:

+             # If we don't know build submitter, use "random" value and keep the

+             # build separated from any other.

+             submitter = uuid.uuid4()

+ 

+         return '{0}--{1}'.format(self.copr.full_name, submitter)

+ 

  

  class DistGitBranch(db.Model, helpers.Serializer):

      """

@@ -106,9 +106,11 @@

            </dd>

            <dt> Built by: </dt>

            <dd>

-             <a href="{{ url_for('coprs_ns.coprs_by_user', username=build.user.name) }}">

-               {{ build.user.name }}

-             </a>

+             {% if build.submitter[1] %}

+             <a href="{{ build.submitter[1] }}">{{ build.submitter[0] }}</a>

+             {% else %}

+             {{ build.submitter[0] or "unknown user" }}

+             {% endif %}

            </dd>

          </dl>

        </div>

@@ -100,7 +100,8 @@

              "project_owner": task.build.copr.owner_name,

              "project_name": task.build.copr_name,

              "project_dirname": task.build.copr_dirname,

-             "submitter": task.build.user.name if task.build.user else None, # there is no user for webhook builds

+             "submitter": task.build.submitter[0],

+             "sandbox": task.build.sandbox,

              "chroot": task.mock_chroot.name,

              "repos": task.build.repos,

              "memory_reqs": task.build.memory_reqs,
@@ -144,6 +145,8 @@

              "project_owner": task.copr.owner_name,

              "project_name": task.copr_name,

              "project_dirname": task.copr_dirname,

+             "submitter": task.submitter[0],

+             "sandbox": task.sandbox,

              "source_type": task.source_type,

              "source_json": task.source_json,

              "chroot": chroot,

@@ -92,6 +92,11 @@

          commits = []

          ref_type = payload['push']['changes'][0]['new']['type']

          ref = payload['push']['changes'][0]['new']['name']

+         try:

+             actor = payload['actor']['links']['html']['href']

+         except KeyError:

+             actor = None

+ 

          if ref_type == 'tag':

              committish = ref

          else:
@@ -104,7 +109,8 @@

      )

  

      for package in packages:

-         BuildsLogic.rebuild_package(package, {'committish': committish})

+         BuildsLogic.rebuild_package(package, {'committish': committish},

+                                     submitted_by=actor)

  

      db.session.commit()

  
@@ -139,6 +145,10 @@

  

          ref_type = payload.get('ref_type', '')

          ref = payload.get('ref', '')

+         try:

+             sender = payload['sender']['url']

+         except KeyError:

+             sender = None

      except KeyError:

          return "Bad Request", 400

  
@@ -146,7 +156,8 @@

  

      committish = (ref if ref_type == 'tag' else payload.get('after', ''))

      for package in packages:

-         BuildsLogic.rebuild_package(package, {'committish': committish})

+         BuildsLogic.rebuild_package(package, {'committish': committish},

+                                     submitted_by=sender)

  

      db.session.commit()

  
@@ -182,6 +193,12 @@

          else:

              ref_type = None

              ref = payload.get('ref', '')

+ 

+         try:

+             submitter = 'gitlab.com:{}'.format(str(payload["user_username"]))

+         except KeyError:

+             submitter = None

+ 

      except KeyError:

          return "Bad Request", 400

  
@@ -189,7 +206,8 @@

  

      committish = (ref if ref_type == 'tag' else payload.get('after', ''))

      for package in packages:

-         BuildsLogic.rebuild_package(package, {'committish': committish})

+         BuildsLogic.rebuild_package(package, {'committish': committish},

+                                     submitted_by=submitter)

  

      db.session.commit()

  

@@ -82,7 +82,7 @@

          self.copr = self.package.copr

  

      def build(self, source_dict_update, copr_dir, update_callback,

-               scm_object_type, scm_object_id, scm_object_url):

+               scm_object_type, scm_object_id, scm_object_url, agent_url):

  

          if self.package.copr_dir.name != copr_dir.name:

              package = PackagesLogic.get_or_create(copr_dir, self.package.name, self.package)
@@ -92,7 +92,7 @@

          db.session.execute('LOCK TABLE build IN EXCLUSIVE MODE')

          return BuildsLogic.rebuild_package(

              package, source_dict_update, copr_dir, update_callback,

-             scm_object_type, scm_object_id, scm_object_url)

+             scm_object_type, scm_object_id, scm_object_url, submitted_by=agent_url)

  

      @classmethod

      def get_candidates_for_rebuild(cls, clone_url):
@@ -163,6 +163,7 @@

          'branch_to': data['msg']['pullrequest']['branch'],

          'start_commit': data['msg']['pullrequest']['commit_start'],

          'end_commit': data['msg']['pullrequest']['commit_stop'],

+         'agent': data['msg']['agent'],

      })

  

  
@@ -184,6 +185,7 @@

          'branch_to': data['msg']['pullrequest']['branch'],

          'start_commit': data['msg']['pullrequest']['commit_start'],

          'end_commit': data['msg']['pullrequest']['commit_stop'],

+         'agent': data['msg']['agent'],

      })

  

  
@@ -205,6 +207,7 @@

          'branch_to': data['msg']['branch'],

          'start_commit': data['msg']['start_commit'],

          'end_commit': data['msg']['end_commit'],

+         'agent': data['msg']['agent'],

      })

  

  
@@ -334,7 +337,8 @@

                          update_callback,

                          event_info.object_type,

                          event_info.object_id,

-                         scm_object_url

+                         scm_object_url,

+                         "{}user/{}".format(base_url, event_info.agent),

                      )

                      if build:

                          log.info('\t -> {}'.format(build.to_dict()))

no initial comment

7 new commits added

  • backend: print sandbox in cleanup_vm_nova.py output
  • backend: really skip VMs with check failure
  • backend: remove unused method get_dirty_vms
  • backend, frontend: sandbox per submitted_by field
  • frontend: fill build.submitted_by for GitHub, GitLab, Bitbucket
  • fronted: fill build.submitted_by for pagure events
  • frontend: nicer submitter identification
4 years ago

7 new commits added

  • backend: print sandbox/builds_count in cleanup_vm_nova.py output
  • backend: really skip VMs with check failure
  • backend: remove unused method get_dirty_vms
  • backend, frontend: sandbox per submitted_by field
  • frontend: fill build.submitted_by for GitHub, GitLab, Bitbucket
  • fronted: fill build.submitted_by for pagure events
  • frontend: nicer submitter identification
4 years ago

7 new commits added

  • backend: print sandbox/builds_count in cleanup_vm_nova.py output
  • backend: really skip VMs with check failure
  • backend: remove unused method get_dirty_vms
  • backend, frontend: sandbox per submitted_by field
  • frontend: fill build.submitted_by for GitHub, GitLab, Bitbucket
  • frontend: fill build.submitted_by for pagure events
  • frontend: nicer submitter identification
4 years ago

Man, this one must have been a pain to implement.
I tried to review and it looks good to me, but it is such a complex change, that I might have missed something. Anyway, I would merge it.

I might have miss something as well, but we have still some time to test this on staging, we have unit tests and sanity tests... is that +1? I guess that we don't have to wait with the merge after the meeting discussion.

rebased onto dbd6ce0

4 years ago

Pull-Request has been merged by frostyx

4 years ago