#1845 TESTING PR FOR THE CI PURPOSES, DON'T MERGE
Closed 2 years ago by praiskup. Opened 2 years ago by praiskup.
Unknown source ci-testing-pr  into  main

file modified
+1
@@ -15,6 +15,7 @@

  _tmp/

  .cache/

  .pytest_cache/

+ .localwrap/

  

  backend/docs/build

  

@@ -22,7 +22,7 @@

      Transformate some function definitions so pylint doesn't object.

      """

      if function.name == 'logger':

-         for prop in ['debug', 'info', 'warning', 'error']:

+         for prop in ['debug', 'info', 'warning', 'error', 'exception']:

              function.instance_attrs[prop] = extract_node('def {name}(arg): return'.format(name=prop))

  

      if function.name in ["upgrade", "downgrade"]:

@@ -17,6 +17,9 @@

  # no default

  destdir=/var/lib/copr/public_html/results

  

+ # Periodically generated statistics/graphs go here

+ statsdir=/var/lib/copr/public_html/stats

+ 

  # how long (in seconds) backend should wait before query frontends

  # for new tasks in queue

  # default is 10

backend/conf/crontab/daily backend/conf/crontab/copr-backend
file renamed
file was moved with no change to the file
@@ -0,0 +1,3 @@

+ #! /bin/sh

+ 

+ runuser -c "/usr/bin/copr-backend-analyze-results && /usr/bin/copr-backend-generate-graphs" - copr >&2 >/dev/null

file modified
+6 -1
@@ -83,6 +83,7 @@

  Requires:   python3-fedmsg

  Requires:   python3-gobject

  Requires:   python3-humanize

+ Requires:   python3-jinja2

  Requires:   python3-libmodulemd1 >= 1.7.0

  Requires:   python3-munch

  Requires:   python3-netaddr
@@ -99,6 +100,7 @@

  Requires:   rpm-sign

  Requires:   rsync

  Requires:   modulemd-tools >= 0.6

+ Requires:   zstd

  

  Requires(post): systemd

  Requires(preun): systemd
@@ -144,6 +146,7 @@

  install -d %{buildroot}/%{_tmpfilesdir}

  install -d %{buildroot}/%{_sbindir}

  install -d %{buildroot}%{_sysconfdir}/cron.daily

+ install -d %{buildroot}%{_sysconfdir}/cron.weekly

  install -d %{buildroot}%{_sysconfdir}/sudoers.d

  install -d %{buildroot}%{_bindir}/

  
@@ -151,7 +154,8 @@

  cp -a run/* %{buildroot}%{_bindir}/

  cp -a conf/copr-be.conf.example %{buildroot}%{_sysconfdir}/copr/copr-be.conf

  

- install -p -m 755 conf/crontab/copr-backend %{buildroot}%{_sysconfdir}/cron.daily/copr-backend

+ install -p -m 755 conf/crontab/daily %{buildroot}%{_sysconfdir}/cron.daily/copr-backend

+ install -p -m 755 conf/crontab/weekly  %{buildroot}%{_sysconfdir}/cron.weekly/copr-backend

  

  cp -a conf/lighttpd/* %{buildroot}%{_pkgdocdir}/lighttpd/

  cp -a conf/logrotate/* %{buildroot}%{_sysconfdir}/logrotate.d/
@@ -225,6 +229,7 @@

  %{_sbindir}/*

  

  %config(noreplace) %{_sysconfdir}/cron.daily/copr-backend

+ %config(noreplace) %{_sysconfdir}/cron.weekly/copr-backend

  %{_datadir}/logstash/patterns/lighttpd.pattern

  

  

@@ -0,0 +1,46 @@

+ """

+ The Copr project globals to simplify access to config, logging, etc.

+ """

+ 

+ import logging

+ import os

+ from copr_backend.helpers import (

+     BackendConfigReader,

+     get_redis_log_handler,

+     RedisPublishHandler,

+ )

+ 

+ 

+ class App:

+     """

+     Shortcut.

+     """

+     log = None

+ 

+     def __init__(self):

+         self._setup_logging()

+         self.config_file = os.environ.get("BACKEND_CONFIG",

+                                           "/etc/copr/copr-be.conf")

+         self.opts = BackendConfigReader(self.config_file).read()

+ 

+     def _setup_logging(self):

+         logging.basicConfig(level=logging.DEBUG)

+         self.log = logging.getLogger()

+         self.log.setLevel(logging.DEBUG)

+ 

+     def setup_redis_logging(self, filename):

+         """

+         Setup a multiprocessing logger to a file inside /var/log/copr-backend,

+         using the Redis handler.

+         """

+         for handler in self.log.handlers:

+             if isinstance(handler, RedisPublishHandler):

+                 return

+         self.log.addHandler(get_redis_log_handler(self.opts, filename))

+ 

+     def redirect_to_redis_log(self, filename):

+         """

+         Drop all handlers from self.log, and add one going through Redis

+         """

+         self.log.handlers = []

+         self.setup_redis_logging(filename)

@@ -31,7 +31,7 @@

  

  MAX_HOST_ATTEMPTS = 3

  MAX_SSH_ATTEMPTS = 5

- MIN_BUILDER_VERSION = "0.49.1.dev"

+ MIN_BUILDER_VERSION = "0.51.1.dev"

  CANCEL_CHECK_PERIOD = 5

  

  MESSAGES = {
@@ -291,7 +291,8 @@

              return

  

          path = os.path.join(self.job.results_dir, "results.json")

-         assert os.path.exists(path)

+         if not os.path.exists(path):

+             raise BackendError("results.json file not found in resultdir")

          with open(path, "r") as f:

              results = json.load(f)

          self.job.results = results
@@ -667,7 +668,6 @@

                  build_details = {

                      "built_packages": self._collect_built_packages(job),

                  }

-                 self._parse_results()

              self.log.info("build details: %s", build_details)

          except Exception as e:

              raise BackendError(
@@ -735,6 +735,7 @@

              if self.opts.do_sign:

                  self._sign_built_packages()

              self._do_createrepo()

+             self._parse_results()

              build_details = self._get_build_details(self.job)

              self.job.update(build_details)

              self._add_pubkey()

@@ -176,7 +176,7 @@

  

              rpms_to_remove.update(task_opts["rpms_to_remove"])

  

-             if len(self.notify_keys) >= MAX_IN_BATCH:

+             if len(self.notify_keys) >= MAX_IN_BATCH - 1:  # one is ours!

                  self.log.info("Batch copr-repo limit %s reached, skip the rest",

                                MAX_IN_BATCH)

                  break

@@ -25,10 +25,7 @@

          self.log_dir = os.path.dirname(self.opts.log_dir)

          if not os.path.exists(self.log_dir):

              os.makedirs(self.log_dir, mode=0o750)

- 

-         self.components = ["spawner", "terminator", "vmm", "build_dispatcher",

-                            "action_dispatcher", "backend", "actions", "worker",

-                            "modifyrepo", "pruner"]

+         self.components = helpers.LOG_COMPONENTS

  

      def setup_logging(self):

  

@@ -38,6 +38,12 @@

  from . import constants

  

  

+ LOG_COMPONENTS = [

+     "spawner", "terminator", "vmm", "build_dispatcher", "action_dispatcher",

+     "backend", "actions", "worker", "modifyrepo", "pruner", "analyze-results",

+ ]

+ 

+ 

  def pyconffile(filename):

      """

      Load python file as configuration file, inspired by python-flask
@@ -354,6 +360,13 @@

          opts.log_format = _get_conf(

              cp, "backend", "log_format", default_log_format)

  

+         opts.statsdir = _get_conf(

+             cp, "backend", "statsdir", "/var/lib/copr/public_html/stats")

+ 

+         opts.stats_templates_dir = _get_conf(

+             cp, "backend", "stats_templates_dir",

+             os.path.join(os.path.dirname(__file__), "stats_templates"))

+ 

          opts.prune_days = _get_conf(cp, "backend", "prune_days", None, mode="int")

  

          # ssh options
@@ -493,6 +506,21 @@

              sys.stderr.write("Failed to publish log record to redis, {}"

                               .format(format_tb(error, ex_tb)))

  

+ def get_redis_log_handler(opts, component):

+     """

+     Get RedisPublishHandler object sending the events to Redis.

+ 

+     :param opts: BackendConfigReader.read() output dict.

+     :param name: Name of the logger, e.g. 'root'.

+     :param component: Where to log the file in /var/log/copr-backend.  When

+         the component is 'pruner', the file will be 'pruner.log'.

+     """

+     assert component in LOG_COMPONENTS

+     rc = get_redis_connection(opts)

+     # level=DEBUG, by default we send everything logger gives us

+     handler = RedisPublishHandler(rc, component, level=logging.DEBUG)

+     return handler

+ 

  

  def get_redis_logger(opts, name, who):

      logger = logging.getLogger(name)
@@ -501,9 +529,7 @@

      logger.setLevel(level)

  

      if not logger.handlers:

-         rc = get_redis_connection(opts)

-         handler = RedisPublishHandler(rc, who, level=logging.DEBUG)

-         logger.addHandler(handler)

+         logger.addHandler(get_redis_log_handler(opts, who))

  

      return logger

  

@@ -69,6 +69,9 @@

          self.results = None

          self.appstream = None

  

+         # {"packages": ...}

+         self.results = None

+ 

          # TODO: validate update data, user marshmallow

          for key, val in task_data.items():

              key = str(key)

@@ -0,0 +1,14 @@

+ """

+ Singleton-like objects to simplify ubiquitous Copr Backend configuration.

+ """

+ 

+ from copr_backend.app import App

+ 

+ # the application

+ app = App()

+ 

+ # configuration, usually in /etc/copr/copr-be.conf

+ config = app.opts

+ 

+ # the default logger

+ log = app.log

empty or binary file added
@@ -0,0 +1,94 @@

+ <!doctype html>

+ <html>

+ 

+ {% set title="Storage consumption per architecture" %}

+ 

+ <head>

+     <title>{{ title }}</title>

+     <script src="Chart.bundle.js"></script>

+     <script src="utils.js"></script>

+     <script src="moment.min.js"></script>

+     <style>

+     canvas {

+         -moz-user-select: none;

+         -webkit-user-select: none;

+         -ms-user-select: none;

+     }

+     </style>

+ </head>

+ 

+ <body>

+     <div style="width:75%">

+         <canvas id="canvas"></canvas>

+     </div>

+     <script>

+         var color = Chart.helpers.color;

+         var scatterChartData = {

+             datasets: {{ datasets|tojson }}

+         };

+ 

+         function setup_colors(value, index, array) {

+             value["borderColor"] = [

+                 window.chartColors.blue,

+                 window.chartColors.red,

+                 window.chartColors.green,

+                 window.chartColors.grey,

+                 window.chartColors.yellow,

+                 window.chartColors.orange,

+                 window.chartColors.purple,

+                 window.chartColors.blue,

+             ][index];

+         };

+ 

+         window.onload = function() {

+ 

+             console.log(window.chartColors);

+ 

+             scatterChartData["datasets"].forEach(setup_colors);

+ 

+             var ctx = document.getElementById("canvas").getContext("2d");

+             window.myScatter = Chart.Scatter(ctx, {

+                 data: scatterChartData,

+                 options: {

+                     title: {

+                         display: true,

+                         text: '{{ title }}',

+                     },

+                     tooltips: {

+                         callbacks: {

+                             label: function(item, data) {

+                                 index = item.datasetIndex;

+                                 dataset = data.datasets[index];

+                                 return dataset.label

+                                        + " => " + new Date(item.xLabel).toISOString().slice(0,10)

+                                        + " => " + Math.round(item.yLabel / 1024 / 1024 * 10)/10 + " GB";

+                             }

+                         }

+                     },

+                     scales: {

+                         xAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return new Date(value).toLocaleDateString(

+                                         "en-US",

+                                         {month: "short", year: "numeric"}

+                                     );

+                                 }

+                             }

+                         }],

+                         yAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return Math.round(value / 1024 / 1024 *10)/10 + " GB";

+                                 }

+                             }

+                         }]

+ 

+                     }

+                 }

+             });

+         };

+     </script>

+ </body>

+ 

+ </html>

@@ -0,0 +1,76 @@

+ <!doctype html>

+ <html>

+ 

+ {% set title="Storage consumption per architecture" %}

+ 

+ <head>

+     <title>{{ title }}</title>

+     <script src="Chart.bundle.js"></script>

+     <script src="utils.js"></script>

+     <script src="moment.min.js"></script>

+     <style>

+     canvas {

+         -moz-user-select: none;

+         -webkit-user-select: none;

+         -ms-user-select: none;

+     }

+     </style>

+ </head>

+ 

+ <body>

+     <div style="width:75%">

+         <canvas id="canvas"></canvas>

+     </div>

+     <script>

+         var color = Chart.helpers.color;

+         var scatterChartData = {

+             datasets: {{ datasets|tojson }}

+         };

+ 

+         window.onload = function() {

+             var ctx = document.getElementById("canvas").getContext("2d");

+             window.myScatter = Chart.Scatter(ctx, {

+                 data: scatterChartData,

+                 options: {

+                     title: {

+                         display: true,

+                         text: '{{ title }}',

+                     },

+                     tooltips: {

+                         callbacks: {

+                             label: function(item, data) {

+                                 index = item.datasetIndex;

+                                 dataset = data.datasets[index];

+                                 return dataset.label

+                                        + " => " + new Date(item.xLabel).toISOString().slice(0,10)

+                                        + " => " + Math.round(item.yLabel / 1024 / 1024 * 10)/10 + " GB";

+                             }

+                         }

+                     },

+                     scales: {

+                         xAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return new Date(value).toLocaleDateString(

+                                         "en-US",

+                                         {month: "short",year: "numeric"}

+                                     );

+                                 }

+                             }

+                         }],

+                         yAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return Math.round(value / 1024 / 1024 *10)/10 + " GB";

+                                 }

+                             }

+                         }]

+ 

+                     }

+                 }

+             });

+         };

+     </script>

+ </body>

+ 

+ </html>

@@ -0,0 +1,91 @@

+ <!doctype html>

+ <html>

+ 

+ {% set title="Storage consumption per distribution" %}

+ 

+ <head>

+     <title>{{ title }}</title>

+     <script src="Chart.bundle.js"></script>

+     <script src="utils.js"></script>

+     <script src="moment.min.js"></script>

+     <style>

+     canvas {

+         -moz-user-select: none;

+         -webkit-user-select: none;

+         -ms-user-select: none;

+     }

+     </style>

+ </head>

+ 

+ <body>

+     <div style="width:75%">

+         <canvas id="canvas"></canvas>

+     </div>

+     <script>

+         var color = Chart.helpers.color;

+         var scatterChartData = {

+             datasets: {{ datasets|tojson }}

+         };

+ 

+         function fix_dataset(value, index, array) {

+             if (value["label"].includes("fedora")) {

+                 value["borderColor"] =  window.chartColors.blue;

+             }

+             if (value["label"].includes("epel")) {

+                 value["borderColor"] =  window.chartColors.yellow;

+             }

+             if (value["label"].includes("centos")) {

+                 value["borderColor"] =  window.chartColors.red;

+             }

+         };

+ 

+         window.onload = function() {

+ 

+             scatterChartData["datasets"].forEach(fix_dataset);

+ 

+             var ctx = document.getElementById("canvas").getContext("2d");

+             window.myScatter = Chart.Scatter(ctx, {

+                 data: scatterChartData,

+                 options: {

+                     title: {

+                         display: true,

+                         text: '{{ title }}',

+                     },

+                     tooltips: {

+                         callbacks: {

+                             label: function(item, data) {

+                                 index = item.datasetIndex;

+                                 dataset = data.datasets[index];

+                                 return dataset.label

+                                        + " => " + new Date(item.xLabel).toISOString().slice(0,10)

+                                        + " => " + Math.round(item.yLabel / 1024 / 1024 * 10)/10 + " GB";

+                             }

+                         }

+                     },

+                     scales: {

+                         xAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return new Date(value).toLocaleDateString(

+                                         "en-US",

+                                         {month: "short",year: "numeric"}

+                                     );

+                                 }

+                             }

+                         }],

+                         yAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return Math.round(value / 1024 / 1024 *10)/10 + " GB";

+                                 }

+                             }

+                         }]

+ 

+                     }

+                 }

+             });

+         };

+     </script>

+ </body>

+ 

+ </html>

@@ -0,0 +1,27 @@

+ <!doctype html>

+ <html>

+ 

+ {% set title = "Copr Backend Storage Statistics" %}

+ 

+ <head>

+     <title>{{ title }}</title>

+     <script src="Chart.bundle.js"></script>

+     <script src="utils.js"></script>

+     <script src="moment.min.js"></script>

+     <style>

+     canvas {

+         -moz-user-select: none;

+         -webkit-user-select: none;

+         -ms-user-select: none;

+     }

+     </style>

+ </head>

+ <body>

+     <h1>{{ title }}</h1>

+     <a href="distro.html">Distro consumption</a><br />

+     <a href="chroots.html">Chroot consumption</a><br />

+     <a href="arches.html">Architecture consumption</a><br />

+     <a href="owners.html">Most consuming owners</a><br />

+     <a href="projects.html">Most consuming projects</a><br />

+ </body>

+ </html>

@@ -0,0 +1,92 @@

+ <!doctype html>

+ <html>

+ 

+ <head>

+     <title>{{ title }}</title>

+     <script src="Chart.bundle.js"></script>

+     <script src="utils.js"></script>

+     <script src="moment.min.js"></script>

+     <style>

+     canvas {

+         -moz-user-select: none;

+         -webkit-user-select: none;

+         -ms-user-select: none;

+     }

+     </style>

+ </head>

+ 

+ <body>

+     <div style="width:75%">

+         <canvas id="canvas"></canvas>

+     </div>

+     <script>

+         var color = Chart.helpers.color;

+         var scatterChartData = {

+             datasets: {{ datasets|tojson }}

+         };

+ 

+         function setup_colors(value, index, array) {

+             value["borderColor"] = [

+                 window.chartColors.blue,

+                 window.chartColors.red,

+                 window.chartColors.green,

+                 window.chartColors.grey,

+                 window.chartColors.yellow,

+                 window.chartColors.orange,

+                 window.chartColors.purple,

+                 window.chartColors.blue,

+             ][index];

+         };

+ 

+         window.onload = function() {

+ 

+             console.log(window.chartColors);

+ 

+             scatterChartData["datasets"].forEach(setup_colors);

+ 

+             var ctx = document.getElementById("canvas").getContext("2d");

+             window.myScatter = Chart.Scatter(ctx, {

+                 data: scatterChartData,

+                 options: {

+                     title: {

+                         display: true,

+                         text: '{{ title }}',

+                     },

+                     tooltips: {

+                         callbacks: {

+                             label: function(item, data) {

+                                 index = item.datasetIndex;

+                                 dataset = data.datasets[index];

+                                 return dataset.label

+                                        + " => " + new Date(item.xLabel).toISOString().slice(0,10)

+                                        + " => " + Math.round(item.yLabel / 1024 / 1024 * 10)/10 + " GB";

+                             }

+                         }

+                     },

+                     scales: {

+                         xAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return new Date(value).toLocaleDateString(

+                                         "en-US",

+                                         {month: "short",year: "numeric"}

+                                     );

+                                 }

+                             }

+                         }],

+                         yAxes: [{

+                             ticks: {

+                                 callback: (value) => {

+                                     return Math.round(value / 1024 / 1024 *10)/10 + " GB";

+                                 }

+                             }

+                         }]

+ 

+                     }

+                 }

+             });

+         };

+     </script>

+ </body>

+ 

+ </html>

file added
+34
@@ -0,0 +1,34 @@

+ #! /bin/bash

+ 

+ scriptdir=$(dirname "$(readlink -f "$0")")

+ localwrap_workdir=$scriptdir/.localwrap

+ 

+ new_paths=$scriptdir:$scriptdir/../common

+ export PYTHONPATH=${PYTHONPATH+$PYTHONPATH:}$new_paths

+ export BACKEND_CONFIG=$localwrap_workdir/backend.conf

+ 

+ die() { echo >&2 "$*" ; exit 1; }

+ 

+ prepare_workdir()

+ {

+     resultdir=$localwrap_workdir/results

+     logdir=$localwrap_workdir/logs

+     statsdir=$localwrap_workdir/stats

+     mkdir -p "$resultdir"

+     cat > "$BACKEND_CONFIG" <<EOF

+ [backend]

+ destdir = $resultdir

+ log_dir = $logdir

+ statsdir = $statsdir

+ EOF

+     mkdir -p "$resultdir/adam/workspace/fedora-rawhide-x86_64/00049143-foo"

+     touch    "$resultdir/adam/workspace/fedora-rawhide-x86_64/00049143-foo/foo.rpm"

+     mkdir -p "$resultdir/bob/sandbox/epel-7-x86_64/00849143-copr-builder"

+     touch    "$resultdir/bob/sandbox/epel-7-x86_64/00849143-copr-builder/copr-builder"

+     mkdir -p "$statsdir"

+ }

+ 

+ test $# -eq 0 && die "What command do you want to wrap?"

+ 

+ prepare_workdir

+ exec "$@"

@@ -0,0 +1,267 @@

+ #! /usr/bin/python3

+ 

+ """

+ Analyze the Copr Backend resultdir storage usage.

+ """

+ 

+ import argparse

+ from datetime import datetime

+ import json

+ import os

+ import pipes

+ import subprocess

+ import time

+ 

+ import humanize

+ 

+ from copr_backend.setup import app, log, config

+ 

+ 

+ def get_arg_parser():

+     """ Return an argument parser """

+     parser = argparse.ArgumentParser(

+         description="Analyze contents of copr-backend resultdir.  Print "

+                     "the statistics to STDOUT")

+     parser.add_argument(

+         "--log-to-stderr",

+         action="store_true",

+         help=("Print logging output to the STDERR instead of log file"))

+     parser.add_argument(

+         "--stdout",

+         action="store_true",

+         help=("Don't dump the statistics to statsdir, but to STDOUT"))

+     parser.add_argument(

+         "--custom-du-command",

+         help="By default we run 'du -x $resultdir', use this for override",

+     )

+     parser.add_argument(

+         "--log-progress-delay",

+         type=int,

+         metavar="SECONDS",

+         help="Print progress info with SECONDS period",

+         default=30,

+     )

+     parser.add_argument(

+         "--output-filename",

+         help="The stats file basename")

+     return parser

+ 

+ 

+ def get_stdout_line(command, binary=False, **kwargs):

+     """

+     Run COMMAND, read it's stdout line-by-line, and yield the line.  The

+     kwargs argument is passed down to Popen() call.

+     """

+ 

+     # Per Python Popen help

+     # ---------------------

+     # If you experience performance issues, it is recommended that you try to

+     # enable buffering by setting bufsize to either -1 or a large enough

+     # positive value (such as 4096).

+     assert "universal_newlines" not in kwargs

+ 

+     sentinel = b'' if binary else ''

+     kwargs['universal_newlines'] = not binary

+ 

+     process = subprocess.Popen(command, bufsize=-1, stdout=subprocess.PIPE,

+                                **kwargs)

+     for line in iter(process.stdout.readline, sentinel):

+         yield line

+ 

+ 

+ class Stats:

+     """ Calculators. """

+     def __init__(self, name, print_items=None):

+         self.data = {}

+         self.name = name

+         self.print_items = print_items

+ 

+     def add(self, key, size):

+         """

+         Append size to the KEY counter, create the counter if it doesn't exist.

+         """

+         if key not in self.data:

+             self.data[key] = 0

+         self.data[key] += size

+ 

+     @property

+     def dict(self):

+         """ Return the stats dictionary """

+         return self.data

+ 

+     def _sorted_iterator(self):

+         for key, value in sorted(self.data.items(), key=lambda item: -item[1]):

+             yield key, value

+ 

+     def log(self, items=None):

+         """ Print the stats in in a nice format """

+ 

+         print_items = self.print_items

+         if items is not None:

+             print_items = items

+ 

+         log.info("=== printing %s ===", self.name)

+         for key, value in self._sorted_iterator():

+             value = humanize.naturalsize(value*1024)

+             log.info("%s %s", key, value)

+             if print_items is not None:

+                 print_items -= 1

+                 if print_items <= 0:

+                     break

+ 

+     def log_line(self, items=None):

+         """ self.print() but more compressed """

+ 

+         print_items = self.print_items

+         if items is not None:

+             print_items = items

+ 

+         line = "{}: ".format(self.name)

+         to_print = []

+         for key, value in self._sorted_iterator():

+             value = humanize.naturalsize(value*1024)

+             to_print.append("{}: {}".format(key, value))

+             if print_items is not None:

+                 print_items -= 1

+                 if print_items <= 0:

+                     break

+         log.info("%s", line + ", ".join(to_print))

+ 

+ 

+ class TimeToPrint:

+     """ Helper class to estimate the time when the output should be printed """

+     def __init__(self, time_check_each=1000, print_per_seconds=3):

+         self.checks = 0

+         self.check_each = time_check_each

+         self.each_second = print_per_seconds

+         self.last_print = None

+ 

+     def should_print(self):

+         """ Return True if it is a time to print """

+         self.checks += 1

+         if not self.last_print:

+             self.last_print = time.time()

+         if self.checks % self.check_each:

+             return False

+         now = time.time()

+         if now > self.last_print + self.each_second:

+             self.last_print = now

+             return True

+         return False

+ 

+ 

+ def compress_file(filename):

+     """ Zstd-compress filename """

+     log.info("Compressing the %s file", filename)

+     compress_cmd = ["zstd", "--rm", filename]

+     subprocess.check_call(compress_cmd)

+ 

+ 

+ def _main(arguments):

+     # pylint: disable=too-many-locals,too-many-statements,too-many-branches

+     resultdir = os.path.normpath(config.destdir)

+ 

+     command = "du -x " + pipes.quote(resultdir)

+     if arguments.custom_du_command:

+         command = arguments.custom_du_command

+ 

+     datadir = os.path.join(config.statsdir, "samples")

+     try:

+         os.makedirs(datadir)

+     except FileExistsError:

+         pass

+ 

+     timestamp = datetime.utcnow().isoformat()

+ 

+     full_du_log = os.path.join(

+         datadir,

+         timestamp + ".du.log")

+ 

+ 

+     stats_file = os.path.join(

+         datadir,

+         timestamp + ".json")

+ 

+     if arguments.output_filename:

+         # We probably consume pre-existing du log, so no need to create yet

+         # another one.

+         full_du_log = "/dev/null"

+         stats_file  = arguments.output_filename

+ 

+     chroots = Stats("chroots", 5)

+     arches = Stats("arches")

+     owners = Stats("owners", 5)

+     projects = Stats("projects", 5)

+     distros = Stats("distros", 6)

+ 

+     all_stats = [chroots, arches, owners, projects, distros]

+ 

+     checker = TimeToPrint(print_per_seconds=arguments.log_progress_delay)

+ 

+     with open(full_du_log, "w") as du_log_fd:

+         for line in get_stdout_line(command, shell=True):

+             # copy the line

+             du_log_fd.write(line)

+ 

+             if checker.should_print():

+                 log.info("=== analyzing period (each %s seconds) ===",

+                          arguments.log_progress_delay)

+                 for stat in all_stats:

+                     stat.log_line()

+ 

+             line = line.strip()

+ 

+             # du format is 'size<tab>path'

+             kbytes, path = line.split('\t')

+             kbytes = int(kbytes)

+ 

+             if not path.startswith(resultdir):

+                 continue

+ 

+             relpath = path[len(resultdir)+1:]

+             if not relpath:

+                 continue

+ 

+             parts = relpath.split("/")

+             if len(parts) == 1:

+                 owner = parts[0]

+                 owners.add(owner, kbytes)

+                 continue

+ 

+             if len(parts) == 2:

+                 project = "/".join(parts)

+                 projects.add(project, kbytes)

+                 continue

+ 

+             if len(parts) == 3:

+                 chroot = parts[-1]

+                 distro = "-".join(parts[:-1])

+                 chroots.add(chroot, kbytes)

+ 

+                 if chroot not in ["srpm-builds", "modules", "repodata"]:

+                     distro, arch = chroot.rsplit("-", 1)

+                     arches.add(arch, kbytes)

+                     distros.add(distro, kbytes)

+ 

+     if full_du_log != "/dev/null":

+         compress_file(full_du_log)

+ 

+     data = {}

+     for stats in all_stats:

+         data[stats.name] = stats.data

+ 

+     output = json.dumps(data, indent=4, sort_keys=True)

+     if arguments.stdout:

+         print(output)

+     else:

+         with open(stats_file, "w+") as file:

+             file.write(output)

+ 

+         compress_file(stats_file)

+ 

+ 

+ if __name__ == "__main__":

+     args = get_arg_parser().parse_args()

+     if not args.log_to_stderr:

+         app.redirect_to_redis_log("analyze-results")

+     _main(args)

@@ -0,0 +1,313 @@

+ #! /usr/bin/python3

+ 

+ """

+ Go to statsdir, gather all the available statistics and draw a set of related

+ graphs.

+ """

+ 

+ import argparse

+ import json

+ import pipes

+ import subprocess

+ import os

+ import dateutil.parser as dp

+ from jinja2 import Template

+ from copr_backend.setup import app, config, log

+ 

+ DOWNLOAD_JS_FILES = [

+     "https://www.chartjs.org/dist/2.6.0/Chart.bundle.js",

+     "https://www.chartjs.org/samples/2.6.0/utils.js",

+     "https://momentjs.com/downloads/moment.min.js",

+ ]

+ 

+ def get_arg_parser():

+     """ Return an argument parser """

+     parser = argparse.ArgumentParser(

+         description="Read pre-generated stats, and generate graphs.")

+     parser.add_argument(

+         "--log-to-stderr",

+         action="store_true",

+         help=("Print logging output to the STDERR instead of log file"))

+     return parser

+ 

+ 

+ def stamp2js(stamp):

+     """ JS works with miliseconds, and we know we work with UTC """

+     return dp.parse(stamp + "Z").timestamp()*1000

+ 

+ 

+ def download_js_files():

+     """

+     Download static JavaScript files

+     """

+     for js_file in DOWNLOAD_JS_FILES:

+         basename = os.path.basename(js_file)

+         local_file = os.path.join(config.statsdir, basename)

+         if os.path.exists(local_file):

+             continue

+         log.info("Downloading %s file", js_file)

+         subprocess.check_call(["wget", js_file, "-O", local_file])

+ 

+ 

+ def read_compressed_json(filename):

+     """

+     Read one json.zst file, and return its contents as dict

+     """

+     decompress = "zstd -d < {}".format(pipes.quote(filename))

+     string = subprocess.check_output(decompress, shell=True)

+     return json.loads(string)

+ 

+ 

+ def expand_template(template_name, destfile, **kwargs):

+     """ Expand the given J2 template with **KWARGS """

+     template = os.path.join(config.stats_templates_dir,

+                             template_name)

+     with open(template, "r") as fd:

+         contents = fd.read()

+     template = Template(contents)

+     destfile = os.path.join(config.statsdir, destfile)

+     with open(destfile, "w") as fd:

+         fd.write(template.render(**kwargs))

+ 

+ 

+ def hide_all_not_up2date(chroot, largest):

+     """

+     What chroots should be disabled by default

+     """

+     limits = {

+         "epel": largest["epel"] - 3,

+         "fedora": largest["fedora"] - 4,

+     }

+ 

+     if "centos-stream" in chroot:

+         return False

+ 

+     parts = chroot.split("-")

+     if len(parts) != 2:

+         return True

+ 

+ 

+     if parts[0] in ["fedora", "epel"]:

+         if parts[1] in ["rawhide", "eln"]:

+             return False

+ 

+         return limits[parts[0]] >= int(parts[1])

+     return True

+ 

+ def get_distro_datasets(all_data):

+     """ Feed distros.html.j2 """

+     all_distros = set([])

+ 

+     distro_datasets = {}

+     for stamp in all_data:

+         all_distros = all_distros.union(set(all_data[stamp]["distros"].keys()))

+         for distro in all_distros:

+             distro_datasets[distro] = {

+                 "label": distro,

+                 "cubicInterpolationMode": "monotone",

+                 "data": [],

+                 "fill": False,

+             }

+ 

+     largest = {

+         "fedora": 0,

+         "epel": 0,

+     }

+     for distro in all_distros:

+         parts = distro.split("-")

+         name = parts[0]

+         version = parts[1]

+         if name in ["fedora", "epel"]:

+             try:

+                 new_val = int(version)

+                 if new_val > largest[name]:

+                     largest[name] = new_val

+             except ValueError:

+                 continue

+ 

+     for distro in all_distros:

+         distro_datasets[distro]["hidden"] = hide_all_not_up2date(distro, largest)

+ 

+     for stamp, data in list_dict_by_key(all_data):

+         for distro, storage in data["distros"].items():

+             distro_datasets[distro]["data"] += [{

+                 "x": stamp2js(stamp),

+                 "y": storage,

+             }]

+ 

+     result = []

+     for _, dataset in distro_datasets.items():

+         result += [dataset]

+     result.sort(key=lambda x: x['label'])

+     return result

+ 

+ 

+ def get_chroots_datasets(all_data):

+     """ Feed chroots.html.j2 """

+     all_chroots = set([])

+ 

+     chroot_datasets = {}

+     for stamp in all_data:

+         all_chroots = all_chroots.union(set(all_data[stamp]["chroots"].keys()))

+         for chroot in all_chroots:

+             chroot_datasets[chroot] = {

+                 "label": chroot,

+                 "hidden": "rawhide" not in chroot,

+                 "cubicInterpolationMode": "monotone",

+                 "data": [],

+             }

+ 

+     for stamp, data in list_dict_by_key(all_data):

+         for chroot, storage in data["chroots"].items():

+             chroot_datasets[chroot]["data"] += [{

+                 "x": stamp2js(stamp),

+                 "y": storage,

+             }]

+ 

+     result = []

+     for chroot, dataset in chroot_datasets.items():

+         result += [dataset]

+     result.sort(key=lambda x: x['label'])

+     return result

+ 

+ 

+ def get_arch_datasets(all_data):

+     """ Feed archs.html.j2 """

+     all_archs = set([])

+ 

+     arch_datasets = {}

+     for stamp in all_data:

+         all_archs = all_archs.union(set(all_data[stamp]["arches"].keys()))

+         for arch in all_archs:

+             arch_datasets[arch] = {

+                 "label": arch,

+                 "cubicInterpolationMode": "monotone",

+                 "data": [],

+             }

+ 

+     for stamp, data in list_dict_by_key(all_data):

+         for arch, storage in data["arches"].items():

+             arch_datasets[arch]["data"] += [{

+                 "x": stamp2js(stamp),

+                 "y": storage,

+             }]

+ 

+     result = []

+     for arch, dataset in arch_datasets.items():

+         result += [dataset]

+     result.sort(key=lambda x: x['label'])

+     return result

+ 

+ 

+ def list_dict_by_key(the_dict, reverse=False):

+     """ Iterate the given dict by key """

+     k = -1 if reverse else 1

+     for key, value in sorted(the_dict.items(), key=lambda item: k*item[0]):

+         yield (key, value)

+ 

+ 

+ def list_dict_backwards(the_dict):

+     """ Iterate dictionary backwards, based on the value part (not key) """

+     for key, value in sorted(the_dict.items(), key=lambda item: -item[1]):

+         yield (key, value)

+ 

+ 

+ def get_topmost_dataset(all_data, stats_type, keep_from_each_set=15,

+                         show_from_last_set=5):

+     """

+     Get the datasets for top users/projects

+     """

+ 

+     last_dataset = all_data[sorted(list(all_data.keys()))[-1]][stats_type]

+ 

+     dont_hide = set()

+     keep_items = set()

+ 

+     for key, _ in list_dict_backwards(last_dataset):

+         show_from_last_set -= 1

+         dont_hide.add(key)

+         if show_from_last_set <= 0:

+             break

+ 

+     for _, day_data in all_data.items():

+         dataset = day_data[stats_type]

+         keep = keep_from_each_set

+         for item, _ in list_dict_backwards(dataset):

+             if keep <= 0:

+                 break

+             keep_items.add(item)

+             keep -= 1

+ 

+     final_order_values = dict()

+     for keep in keep_items:

+         final_order_values[keep] = last_dataset[keep]

+     final_order = [key for key, _ in list_dict_backwards(final_order_values)]

+ 

+     result_dict = {}

+     for keep in keep_items:

+         result_dict[keep] = {

+             "label": keep,

+             "cubicInterpolationMode": "monotone",

+             "data": [],

+         }

+ 

+     for stamp, data in list_dict_by_key(all_data):

+         dataset = data[stats_type]

+ 

+         for keep in keep_items:

+             result_dict[keep]["data"] += [{

+                 "x": stamp2js(stamp),

+                 "y": dataset.get(keep, 0),

+             }]

+ 

+     return_datasets = []

+     for label in final_order:

+         dataset = result_dict[label]

+         dataset["hidden"] = label not in dont_hide

+         return_datasets += [dataset]

+ 

+     return return_datasets

+ 

+ 

+ def _main(_args):

+     download_js_files()

+     sampledir = os.path.join(config.statsdir, "samples")

+     files = os.listdir(sampledir)

+     json_files = sorted([sample for sample in files

+                          if sample.endswith('json.zst')])

+ 

+     all_data = {}

+     for file in json_files:

+         stamp = file[:-9]

+         abs_file = os.path.join(sampledir, file)

+         all_data[stamp] = read_compressed_json(abs_file)

+ 

+     expand_template("index.html.j2", "index.html")

+ 

+     datasets = get_distro_datasets(all_data)

+     expand_template("distro.html.j2", "distro.html",

+                     datasets=datasets)

+ 

+     datasets = get_chroots_datasets(all_data)

+     expand_template("chroots.html.j2", "chroots.html",

+                     datasets=datasets)

+ 

+     datasets = get_arch_datasets(all_data)

+     expand_template("arches.html.j2", "arches.html",

+                     datasets=datasets)

+ 

+     datasets = get_topmost_dataset(all_data, "projects")

+     expand_template("topmost.html.j2", "projects.html",

+                     datasets=datasets,

+                     title="Consumption per projects")

+ 

+     datasets = get_topmost_dataset(all_data, "owners")

+     expand_template("topmost.html.j2", "owners.html",

+                     datasets=datasets,

+                     title="Consumption per owners")

+ 

+ if __name__ == "__main__":

+     args = get_arg_parser().parse_args()

+     if not args.log_to_stderr:

+         app.redirect_to_redis_log("analyze-results")

+     _main(args)

file modified
+2 -1
@@ -29,7 +29,8 @@

  trap cleanup EXIT

  

  common_path=$(readlink -f ../common)

- export PYTHONPATH="$common_path:$PWD:$PWD/tests:$PWD/run${PYTHONPATH+:$PYTHONPATH}"

+ messaging_path=$(readlink -f ../messaging)

+ export PYTHONPATH="$common_path:$messaging_path:$PWD:$PWD/tests:$PWD/run${PYTHONPATH+:$PYTHONPATH}"

  export PATH="$PWD/run${PATH+:$PATH}"

  

  COVPARAMS='--cov-report term-missing --cov ./copr_backend --cov ./run'

file modified
+1
@@ -24,6 +24,7 @@

      url=__url__,

      license='GPLv2+',

      packages=find_packages(exclude=('tests*',)),

+     package_data={'': ['*.j2']},

      include_package_data=True,

      zip_safe=False,

  )

@@ -183,34 +183,52 @@

  

      def test_batched_createrepo_task_limit(self, caplog):

          some_dir = "/some/dir/name:pr:limit"

+ 

+         # request a createrepo run (devel == False!)

          bcr = self._prep_batched_repo(some_dir)

          key = bcr.make_request()

+         assert len(self.redis.keys()) == 1

  

-         # create limit +2 other requests, one is not to be processed, once

-         # skipped

+         # add 'add_1' task

          self.request_createrepo.get(some_dir)

+ 

+         # another background, not considered because devel == True

          self.request_createrepo.get(some_dir, {"add": ["add_2"], "devel": True})

+ 

+         # Fill-up the MAX_IN_BATCH quota with background requests, and add

+         # two more.

          for i in range(3, 3 + MAX_IN_BATCH):

              add_dir = "add_{}".format(i)

              self.request_createrepo.get(some_dir, {"add": [add_dir]})

  

-         # nobody processed us

+         # MAX_IN_BATCH + 2 more above + one is ours

+         assert len(self.redis.keys()) == MAX_IN_BATCH + 2 + 1

+ 

+         # Nobody processed us, drop us from DB

          assert not bcr.check_processed()

+         assert len(self.redis.keys()) == MAX_IN_BATCH + 2

  

-         expected = {"add_{}".format(i) for i in range(1, MAX_IN_BATCH + 2)}

+         # What directories should be processed at once?  Drop add_2 as it is

+         # devel=True.

+         expected = {"add_{}".format(i) for i in range(1, MAX_IN_BATCH + 3)}

          expected.remove("add_2")

-         assert len(expected) == MAX_IN_BATCH

+         assert len(expected) == MAX_IN_BATCH + 1

  

          full, add, remove, rpms_to_remove = bcr.options()

          assert (full, remove, rpms_to_remove) == (False, set(), set())

-         assert len(add) == MAX_IN_BATCH

+         # check that the batch is this request + (MAX_IN_BATCH - 1)

+         assert len(add) == MAX_IN_BATCH - 1

+ 

+         # The redis.keys() list isn't sorted, and even if it was - our own

+         # PID in the key would make the final order.  Therefore we don't know

+         # which items are skipped, but we know there are two left for the next

+         # batch.

+         assert len(expected-add) == 2

  

-         # The redis.keys() isn't sorted, and even if it was - PID would make the

-         # order.  Therefore we don't know which one is skipped, but only one is.

-         assert len(add - expected) == 1

-         assert len(expected - add) == 1

+         # Nothing unexpected should go here.

+         assert add-expected == set()

  

-         assert len(bcr.notify_keys) == MAX_IN_BATCH

+         assert len(bcr.notify_keys) == MAX_IN_BATCH - 1

          assert self.redis.hgetall(key) == {}

          assert len(caplog.record_tuples) == 3

          assert_logs_exist({
@@ -218,3 +236,13 @@

              "Batch copr-repo limit",

              "Checking if we have to start actually",

          }, caplog)

+ 

+         bcr.commit()

+         without_status = set()

+         for key in self.redis.keys():

+             if not self.redis.hget(key, "status"):

+                 data = json.loads(self.redis.hget(key, "task"))

+                 for add_dir in data["add"]:

+                     without_status.add(add_dir)

+         assert "add_2" in without_status

+         assert len(without_status) == 3

@@ -34,7 +34,7 @@

          pass

  

      def test_redis_logger_exception(self):

-         log = get_redis_logger(self.opts, "copr_backend.test", "test")

+         log = get_redis_logger(self.opts, "copr_backend.test", "backend")

          try:

              raise BackendError("foobar")

          except Exception as err:
@@ -42,7 +42,7 @@

  

          (_, raw_message) = self.rc.blpop([LOG_REDIS_FIFO])

          data = json.loads(raw_message)

-         assert data.get("who") == "test"

+         assert data.get("who") == "backend"

          assert data.get("levelno") == logging.ERROR

          assert "error occurred: Backend process error: foobar\n" in data["msg"]

          assert 'raise BackendError("foobar")' in data["msg"]

@@ -0,0 +1,19 @@

+ """

+ index CoprChroot.deleted

+ 

+ Revision ID: 8ea94673d6ee

+ Revises: 2318cc31444e

+ Create Date: 2021-06-02 07:19:47.605140

+ """

+ 

+ from alembic import op

+ 

+ 

+ revision = '8ea94673d6ee'

+ down_revision = '2318cc31444e'

+ 

+ def upgrade():

+     op.create_index(op.f('ix_copr_chroot_deleted'), 'copr_chroot', ['deleted'], unique=False)

+ 

+ def downgrade():

+     op.drop_index(op.f('ix_copr_chroot_deleted'), table_name='copr_chroot')

@@ -0,0 +1,67 @@

+ """

+ Fix unnoticed, but outdated chroots.

+ """

+ 

+ import datetime

+ import click

+ from coprs import app, db

+ from coprs.logic.coprs_logic import CoprChrootsLogic

+ 

+ 

+ @click.command()

+ @click.option(

+     "--prolong-days",

+     type=int,

+     metavar="N",

+     show_default=True,

+     default=app.config["EOL_CHROOTS_NOTIFICATION_PERIOD"]*2,

+     help="The prolonged chroots will have N more days to let maintainers "

+          "know.",

+ )

+ @click.option(

+     "--delete-after-days",

+     type=int,

+     metavar="N",

+     show_default=True,

+     default=app.config["EOL_CHROOTS_NOTIFICATION_PERIOD"],

+     help="Consider chroots that don't have delete_after in too far "

+          "future, at most N days.",

+ )

+ @click.option(

+     "--dry-run/--no-dry-run",

+     default=False,

+     help="Don't change the database, and just print what would otherwise "

+          "happen."

+ )

+ def fixup_unnoticed_chroots(dry_run, delete_after_days, prolong_days):

+     """

+     Just in case some of the outdated chroots got no e-mail notification so far

+     - and the delte_after property has already passed -- give such chroot a bit

+     more time so maintainers can be notified.  E.g. see issue#1724.

+     """

+     counter = 0

+     query = CoprChrootsLogic.should_already_be_noticed(delete_after_days)

+ 

+     new_delete_after = datetime.datetime.now() \

+                      + datetime.timedelta(days=prolong_days)

+ 

+     for copr_chroot in query:

+         counter += 1

+         current_after_days = copr_chroot.delete_after_days

+         if current_after_days < 0:

+             current_after_days = 0

+ 

+         print("Prolonging {}/{} (id={}) by {} days".format(

+             copr_chroot.copr.full_name,

+             copr_chroot.name,

+             copr_chroot.id,

+             prolong_days - current_after_days))

+ 

+         if dry_run:

+             continue

+ 

+         copr_chroot.delete_after = new_delete_after

+ 

+     print("Prolonged {} chroots".format(counter))

+ 

+     db.session.commit()

@@ -87,6 +87,10 @@

  DIST_GIT_URL = "http://copr-dist-git-dev.fedorainfracloud.org/cgit"

  COPR_DIST_GIT_LOGS_URL = "http://copr-dist-git-dev.fedorainfracloud.org/per-task-logs"

  

+ # The web-UI page layout shows a "storage statistics" link in footer when

+ # this is configured (!= None).

+ BACKEND_STATS_URI = "/stats/index.html"

+ 

  # primary

  LOG_FILENAME = "/var/log/copr-frontend/frontend.log"

  LOG_DIR = "/var/log/copr-frontend/"

@@ -11,6 +11,7 @@

      SECRET_KEY = "THISISNOTASECRETATALL"

      BACKEND_PASSWORD = "thisisbackend"

      BACKEND_BASE_URL = "http://copr-be-dev.cloud.fedoraproject.org"

+     BACKEND_STATS_URI = None

  

      KRB5_LOGIN_BASEURI = "/krb5_login/"

      KRB5_LOGIN = {}

@@ -201,7 +201,7 @@

              build_ids.append(build.id)

  

              # inherit some params from the first build

-             for param in ['ownername', 'projectname']:

+             for param in ["ownername", "projectname", "appstream"]:

                  new = build_delete_data[param]

                  if param in data and data[param] != new:

                      # this shouldn't happen

@@ -857,8 +857,8 @@

              storage_path = app.config["STORAGE_DIR"]

              try:

                  shutil.rmtree(os.path.join(storage_path, tmp))

-             except:

-                 pass

+             except OSError:

+                 log.exception("Can't remove tmpdir '%s'", tmp)

  

  

      @classmethod
@@ -947,6 +947,7 @@

  

              if new_status == StatusEnum("failed"):

                  build.fail_type = FailTypeEnum("srpm_build_error")

+                 BuildsLogic.delete_local_source(build)

  

              cls.process_update_callback(build)

              db.session.add(build)
@@ -964,7 +965,6 @@

                      if upd_dict.get("status") in BuildsLogic.terminal_states:

                          build_chroot.ended_on = upd_dict.get("ended_on") or time.time()

                          assert isinstance(upd_dict, dict)

-                         assert not isinstance(upd_dict, str)

                          BuildChrootResultsLogic.create_from_dict(

                              build_chroot, upd_dict.get("results"))

  
@@ -1392,6 +1392,8 @@

  

          and records all of the built packages for a given `BuildChroot`.

          """

+         if results is None or "packages" not in results:

+             return []

          return [cls.create(build_chroot, **result)

                  for result in results["packages"]]

  

@@ -1053,6 +1053,29 @@

                       # Filter only inactive (i.e. EOL) chroots

                       .filter(not_(models.MockChroot.is_active)))

  

+ 

+     @classmethod

+     def should_already_be_noticed(cls, remaining_days):

+         """

+         In issue#1724 we realized that we did not notify some chroots.  This

+         method is here temporarily to fix the situation.  We give such chroots

+         a bit more time so there's a chance we'll notify the maintainers.

+         """

+         exp_delete_after = datetime.datetime.now() \

+                          + datetime.timedelta(days=remaining_days)

+ 

+         query = cls.get_multiple()

+         return (

+             query.filter(models.CoprChroot.delete_after

+                          < exp_delete_after)

+              # Filter-out manually deleted chroots.

+              .filter(models.CoprChroot.deleted.isnot(True))

+              # We want not-yet notified chroots.

+              .filter(models.CoprChroot.delete_notify.is_(None))

+              # Filter only inactive (i.e. EOL) chroots

+              .filter(not_(models.MockChroot.is_active))

+         )

+ 

      @classmethod

      def filter_to_be_deleted(cls, query):

          """

@@ -1471,6 +1471,7 @@

          """

          return {bc.name: bc.results_dict for bc in self.build_chroots}

  

+     @property

      def appstream(self):

          """Whether appstream metadata should be generated for a build."""

          return self.copr.appstream
@@ -1620,7 +1621,7 @@

      bootstrap_image = db.Column(db.Text)

  

      isolation = db.Column(db.Text, default="unchanged")

-     deleted = db.Column(db.Boolean, default=False)

+     deleted = db.Column(db.Boolean, default=False, index=True)

  

      def update_comps(self, comps_xml):

          if isinstance(comps_xml, str):
@@ -2074,8 +2075,8 @@

          return "Action {0} on {1}, old value: {2}, new value: {3}.".format(

              self.action_type, self.object_type, self.old_value, self.new_value)

  

-     def to_dict(self, **kwargs):

-         d = super(Action, self).to_dict()

+     def to_dict(self, options=None, **kwargs):

+         d = super(Action, self).to_dict(options)

          if d.get("object_type") == "module":

              module = Module.query.filter(Module.id == d["object_id"]).first()

              data = json.loads(d["data"])

@@ -116,6 +116,9 @@

                        <li> <a href="{{ url_for('user_ns.user_info') }}">GDPR</a> </li>

                        <li> <a href="{{ url_for('rss_ns.rss') }}">RSS</a> </li>

                        <li> <a href="/db_dumps/">Database Dump</a> </li>

+ {% if config.BACKEND_STATS_URI %}

+                       <li><a href="{{ config.BACKEND_BASE_URL }}{{ config.BACKEND_STATS_URI }}">Storage statistics</a> </li>

+ {% endif %}

                      </ul>

                    </dd>

                  </dl>

@@ -36,6 +36,7 @@

  import commands.clean_expired_projects

  import commands.clean_old_builds

  import commands.delete_orphans

+ import commands.fixup_unnoticed_chroots

  

  from coprs import app

  
@@ -61,6 +62,7 @@

      "drop_chroot",

      "branch_fedora",

      "comment_chroot",

+     "fixup_unnoticed_chroots",

  

      # User commands

      "alter_user",

@@ -408,7 +408,7 @@

                               f_mock_chroots, f_builds,f_users_api, ):

  

          self.db.session.commit()

-         self.b1.appstream = True

+         self.b1.copr.appstream = True

          b_id = self.b1.id

          href = "/api_2/builds/{}".format(b_id)

          r = self.request_rest_api_with_auth(

@@ -1,6 +1,7 @@

  # -*- encoding: utf-8 -*-

  

  import json

+ import os

  import time

  

  import pytest
@@ -229,7 +230,7 @@

              expected_chroots_to_delete.add(bchroot.name)

  

          assert len(ActionsLogic.get_many().all()) == 0

-         self.b4.appstream = True

+         self.b4.copr.appstream = True

          BuildsLogic.delete_build(self.u1, self.b4)

          self.db.session.commit()

  
@@ -251,7 +252,7 @@

          self.db.session.commit()

  

          assert len(ActionsLogic.get_many().all()) == 0

-         self.b1.appstream = True

+         self.b1.copr.appstream = True

          BuildsLogic.delete_build(self.u1, self.b1)

          self.db.session.commit()

  
@@ -295,6 +296,7 @@

  

          # doesn't contain 'fedora-18-x86_64': ['bar']!

          assert delete_data == {

+             'appstream': True,

              'ownername': 'user1',

              'projectname': 'foocopr',

              'project_dirnames': {
@@ -326,7 +328,7 @@

              expected_chroots_to_delete.add(bchroot.name)

  

          assert len(ActionsLogic.get_many().all()) == 0

-         self.b1.appstream = True

+         self.b1.copr.appstream = True

          BuildsLogic.delete_build(self.u1, self.b1)

          self.db.session.commit()

  
@@ -402,7 +404,7 @@

          # we can not delete not-yet finished builds!

          assert len(self.db.session.query(models.Build).all()) == 4

  

-         self.b3.appstream = True

+         self.b3.copr.appstream = True

          for bch in self.b3.build_chroots:

              bch.status = StatusEnum('succeeded')

              self.db.session.add(bch)
@@ -519,6 +521,71 @@

          assert build.source_status == StatusEnum("importing")

          assert build.package.name == "foo"

  

+     @TransactionDecorator("u1")

+     @pytest.mark.usefixtures("f_users", "f_users_api", "f_mock_chroots", "f_db")

+     @pytest.mark.parametrize("fail", [False, True])

+     def test_temporary_data_removed(self, fail):

+         self.web_ui.new_project("test", ["fedora-rawhide-i386"])

+         content = "content"

+         filename = "fake.src.rpm"

+         def f_uploader(file_path):

+             assert file_path.endswith(filename)

+             with open(file_path, "w") as fd:

+                 fd.write(content)

+ 

+         user = models.User.query.get(1)

+         copr = models.Copr.query.first()

+         build = BuildsLogic.create_new_from_upload(

+             user, copr, f_uploader, os.path.basename(filename),

+             chroot_names=["fedora-18-x86_64"],

+             copr_dirname=None)

+ 

+         source_dict = build.source_json_dict

+         storage = os.path.join(self.app.config["STORAGE_DIR"], source_dict["tmp"])

+         with open(os.path.join(storage, filename), "r") as fd:

+             assert fd.readlines() == [content]

+         self.db.session.commit()

+         assert os.path.exists(storage)

+ 

+         form_data = {

+             "builds": [{

+                 "id": 1,

+                 "task_id": "1",

+                 "srpm_url": "http://foo",

+                 "status": 0 if fail else 1,

+                 "pkg_name": "foo",  # not a 'copr-cli'!

+                 "pkg_version": 1

+             }],

+         }

+ 

+         self.backend.update(form_data)

+         build = models.Build.query.get(1)

+         assert build.source_state == "failed" if fail else "importing"

+ 

+         # Removed upon failure, otherwise exists!

+         assert os.path.exists(storage) is not fail

+ 

+         if fail:

+             # nothing is imported in this case, nothing to test

+             return

+ 

+         # test that import hook works

+         r = self.tc.post("/backend/import-completed/",

+                          content_type="application/json",

+                          headers=self.auth_header,

+                          data=json.dumps({

+                             "build_id": 1,

+                             "branch_commits": {

+                                 "master": "4dc32823233c0ef1aacc6f345b674d4f40a026b8"

+                             },

+                             "reponame": "test/foo"

+                         }))

+         assert r.status_code == 200

+         build = models.Build.query.get(1)

+         assert build.source_state == "succeeded"

+         assert not os.path.exists(storage)

+ 

+ 

      @pytest.mark.usefixtures("f_users", "f_coprs", "f_mock_chroots", "f_builds", "f_db")

      def test_build_results_filter(self):

          """

@@ -147,7 +147,7 @@

          self.db.session.commit()

  

          expected_chroot_builddirs = {'srpm-builds': [self.b_few_chroots.result_dir]}

-         self.b_few_chroots.appstream = True

+         self.b_few_chroots.copr.appstream = True

          for chroot in self.b_few_chroots.build_chroots:

              expected_chroot_builddirs[chroot.name] = [chroot.result_dir]

  
@@ -177,7 +177,7 @@

          self.db.session.commit()

  

          b_id = self.b1.id

-         self.b1.appstream = True

+         self.b1.copr.appstream = True

          url = "/coprs/{0}/{1}/delete_build/{2}/".format(self.u1.name, self.c1.name, b_id)

  

          r = self.test_client.post(

file modified
+1 -1
@@ -20,7 +20,7 @@

  %{expand: %%global latest_requires_packages %1 %%{?latest_requires_packages}}

  

  Name:    copr-rpmbuild

- Version: 0.51

+ Version: 0.51.1.dev

  Summary: Run COPR build tasks

  Release: 1%{?dist}

  URL: https://pagure.io/copr/copr

no initial comment

Build succeeded.

Pull-Request has been closed by praiskup

2 years ago

Pull-Request has been reopened by praiskup

2 years ago

rebased onto 341f540

2 years ago

Build succeeded.

Pull-Request has been closed by praiskup

2 years ago
Metadata
Changes Summary 40
+1 -0
file changed
.gitignore
+1 -1
file changed
.pylintpath/pylint_copr_plugin.py
+3 -0
file changed
backend/conf/copr-be.conf.example
+0 -0
file renamed
backend/conf/crontab/copr-backend
backend/conf/crontab/daily
+3
file added
backend/conf/crontab/weekly
+6 -1
file changed
backend/copr-backend.spec
+46
file added
backend/copr_backend/app.py
+4 -3
file changed
backend/copr_backend/background_worker_build.py
+1 -1
file changed
backend/copr_backend/createrepo.py
+1 -4
file changed
backend/copr_backend/daemons/log.py
+29 -3
file changed
backend/copr_backend/helpers.py
+3 -0
file changed
backend/copr_backend/job.py
+14
file added
backend/copr_backend/setup.py
+0
file added
backend/copr_backend/stats_templates/__init__.py
+94
file added
backend/copr_backend/stats_templates/arches.html.j2
+76
file added
backend/copr_backend/stats_templates/chroots.html.j2
+91
file added
backend/copr_backend/stats_templates/distro.html.j2
+27
file added
backend/copr_backend/stats_templates/index.html.j2
+92
file added
backend/copr_backend/stats_templates/topmost.html.j2
+34
file added
backend/localwrap
+267
file added
backend/run/copr-backend-analyze-results
+313
file added
backend/run/copr-backend-generate-graphs
+2 -1
file changed
backend/run_tests.sh
+1 -0
file changed
backend/setup.py
+39 -11
file changed
backend/tests/test_createrepo.py
+2 -2
file changed
backend/tests/test_helpers.py
+19
file added
frontend/coprs_frontend/alembic/versions/8ea94673d6ee_index_coprchroot_deleted.py
+67
file added
frontend/coprs_frontend/commands/fixup_unnoticed_chroots.py
+4 -0
file changed
frontend/coprs_frontend/config/copr.conf
+1 -0
file changed
frontend/coprs_frontend/coprs/config.py
+1 -1
file changed
frontend/coprs_frontend/coprs/logic/actions_logic.py
+5 -3
file changed
frontend/coprs_frontend/coprs/logic/builds_logic.py
+23 -0
file changed
frontend/coprs_frontend/coprs/logic/coprs_logic.py
+4 -3
file changed
frontend/coprs_frontend/coprs/models.py
+3 -0
file changed
frontend/coprs_frontend/coprs/templates/layout.html
+2 -0
file changed
frontend/coprs_frontend/manage.py
+1 -1
file changed
frontend/coprs_frontend/tests/test_api/test_build_r.py
+71 -4
file changed
frontend/coprs_frontend/tests/test_logic/test_builds_logic.py
+2 -2
file changed
frontend/coprs_frontend/tests/test_views/test_coprs_ns/test_coprs_builds.py
+1 -1
file changed
rpmbuild/copr-rpmbuild.spec