| |
@@ -9,6 +9,7 @@
|
| |
from sqlalchemy.sql import text
|
| |
from sqlalchemy import or_
|
| |
from sqlalchemy import and_
|
| |
+ from sqlalchemy import func
|
| |
from sqlalchemy.orm import joinedload
|
| |
from sqlalchemy.orm.exc import NoResultFound
|
| |
from sqlalchemy.sql import false,true
|
| |
@@ -84,14 +85,6 @@
|
| |
return list(query.all()[:4])
|
| |
|
| |
@classmethod
|
| |
- def get_tasks_by_time(cls, start, end):
|
| |
- result = models.BuildChroot.query.join(models.Build)\
|
| |
- .filter(models.BuildChroot.ended_on >= start)\
|
| |
- .filter(models.Build.submitted_on <= end)\
|
| |
- .order_by(models.Build.id.asc())
|
| |
- return result
|
| |
-
|
| |
- @classmethod
|
| |
def get_running_tasks_by_time(cls, start, end):
|
| |
result = models.BuildChroot.query\
|
| |
.filter(models.BuildChroot.ended_on > start)\
|
| |
@@ -119,6 +112,111 @@
|
| |
return data
|
| |
|
| |
@classmethod
|
| |
+ def get_chroot_histogram(cls, start, end):
|
| |
+ chroots = []
|
| |
+ chroot_query = BuildChroot.query\
|
| |
+ .filter(models.BuildChroot.started_on < end)\
|
| |
+ .filter(models.BuildChroot.ended_on > start)\
|
| |
+ .with_entities(BuildChroot.mock_chroot_id,
|
| |
+ func.count(BuildChroot.mock_chroot_id))\
|
| |
+ .group_by(BuildChroot.mock_chroot_id)\
|
| |
+ .order_by(BuildChroot.mock_chroot_id)
|
| |
+
|
| |
+ for chroot in chroot_query:
|
| |
+ chroots.append([chroot[0], chroot[1]])
|
| |
+
|
| |
+ mock_chroots = coprs_logic.MockChrootsLogic.get_multiple()
|
| |
+ for mock_chroot in mock_chroots:
|
| |
+ for l in chroots:
|
| |
+ if l[0] == mock_chroot.id:
|
| |
+ l[0] = mock_chroot.name
|
| |
+
|
| |
+ return chroots
|
| |
+
|
| |
+ @classmethod
|
| |
+ def get_tasks_histogram(cls, type, start, end, step):
|
| |
+ start = start - (start % step) # align graph interval to a multiple of step
|
| |
+ end = end - (end % step)
|
| |
+ steps = int((end - start) / step + 0.5)
|
| |
+ data = [['pending'], ['running'], ['avg running'], ['time']]
|
| |
+
|
| |
+ result = models.BuildsStatistics.query\
|
| |
+ .filter(models.BuildsStatistics.stat_type == type)\
|
| |
+ .filter(models.BuildsStatistics.time >= start)\
|
| |
+ .filter(models.BuildsStatistics.time <= end)\
|
| |
+ .order_by(models.BuildsStatistics.time)
|
| |
+
|
| |
+ for row in result:
|
| |
+ data[0].append(row.pending)
|
| |
+ data[1].append(row.running)
|
| |
+
|
| |
+ for i in range(len(data[0]) - 1, steps):
|
| |
+ step_start = start + i * step
|
| |
+ step_end = step_start + step
|
| |
+
|
| |
+ query_pending = text("""
|
| |
+ SELECT COUNT(*) as pending
|
| |
+ FROM build_chroot JOIN build on build.id = build_chroot.build_id
|
| |
+ WHERE
|
| |
+ (
|
| |
+ build_chroot.started_on > :start
|
| |
+ OR build_chroot.started_on is NULL
|
| |
+ )
|
| |
+ AND build.submitted_on < :end
|
| |
+ AND (
|
| |
+ build_chroot.ended_on > :start
|
| |
+ OR build_chroot.ended_on IS NULL
|
| |
+ )
|
| |
+ AND NOT ((build.submitted_on NOT BETWEEN :start and :end)
|
| |
+ AND (build_chroot.started_on NOT BETWEEN :start AND :end)
|
| |
+ AND (build.submitted_on >= :start OR build_chroot.started_on <= :end)
|
| |
+ )
|
| |
+ """)
|
| |
+
|
| |
+ query_running = text("""
|
| |
+ SELECT COUNT(*) as running
|
| |
+ FROM build_chroot
|
| |
+ WHERE
|
| |
+ started_on < :end
|
| |
+ AND (ended_on > :start OR ended_on IS NULL)
|
| |
+ AND (
|
| |
+ (started_on BETWEEN :start AND :end)
|
| |
+ OR (ended_on BETWEEN :start AND :end)
|
| |
+ OR (started_on < :start
|
| |
+ AND (ended_on > :end OR (ended_on IS NULL AND status = :status)))
|
| |
+ )
|
| |
+ """)
|
| |
+
|
| |
+ res_pending = db.engine.execute(query_pending, start=step_start, end=step_end)
|
| |
+ res_running = db.engine.execute(query_running, start=step_start, end=step_end,
|
| |
+ status=helpers.StatusEnum('running'))
|
| |
+
|
| |
+ pending = res_pending.first().pending
|
| |
+ running = res_running.first().running
|
| |
+ data[0].append(pending)
|
| |
+ data[1].append(running)
|
| |
+
|
| |
+ statistic = models.BuildsStatistics(
|
| |
+ time = step_start,
|
| |
+ stat_type = type,
|
| |
+ running = running,
|
| |
+ pending = pending
|
| |
+ )
|
| |
+ db.session.merge(statistic)
|
| |
+ db.session.commit()
|
| |
+
|
| |
+ running_total = 0
|
| |
+ for i in range(1, steps + 1):
|
| |
+ running_total += data[1][i]
|
| |
+
|
| |
+ data[2].extend([running_total * 1.0 / steps] * (len(data[0]) - 1))
|
| |
+
|
| |
+ for i in range(start, end, step):
|
| |
+ data[3].append(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(i)))
|
| |
+
|
| |
+ return data
|
| |
+
|
| |
+ @classmethod
|
| |
def get_build_importing_queue(cls, background=None):
|
| |
"""
|
| |
Returns Builds which are waiting to be uploaded to dist git
|
| |
We tried to optimize the SQL query for graph data generation. However, it still takes way too much time -- for this reason, I created a db table for caching already generated data.
In addition, this PR adds an hourly cron job that generates graph data to prevent long loading times when no one accessed the graph page in some time, and a daily cron job to delete old cached data that is no longer used in the graphs.
I have also slightly changed the mouseover labels in the graphs so that the 24h one shows only time and the 90d shows only date, and fixed the data for pending builds that were previously shown incorrectly.