#296 new features for logview
Merged 3 years ago by kevin. Opened 3 years ago by darknao.
fedora-infra/ darknao/ansible feature/logview  into  master

file modified
+60 -41
@@ -15,12 +15,31 @@ 

  # You should have received a copy of the GNU General Public License

  # along with Ansible.  If not, see <http://www.gnu.org/licenses/>.

  

- 

+ # Make coding more python3-ish

+ from __future__ import (absolute_import, division, print_function)

+ __metaclass__ = type

+ 

+ DOCUMENTATION = r'''

+ callback: logdetail

+ callback_type: notification

+ short_description: Logs playbook results, per date, playbook and host.

+ description: Logs playbook results, per date, playbook and host, in I(log_path).

+ options:

+   log_path:

+     description: The path where log files will be created.

+     default: /var/log/ansible

+     ini:

+     - section: callback_logdetail

+       key: log_path

+     env:

+     - name: ANSIBLE_LOGDETAIL_PATH

+ '''

  

  import os

  import time

  import json

  import pwd

+ import gzip

  

  try:

      from ansible.utils.hashing import secure_hash
@@ -33,11 +52,10 @@ 

      # Ansible v1 compat

      CallbackBase = object

  

- TIME_FORMAT="%b %d %Y %H:%M:%S"

+ TIME_FORMAT = "%b %d %Y %H:%M:%S"

  

- MSG_FORMAT="%(now)s\t%(count)s\t%(category)s\t%(name)s\t%(data)s\n"

+ MSG_FORMAT = "%(now)s\t%(count)s\t%(category)s\t%(name)s\t%(data)s\n"

  

- LOG_PATH = '/var/log/ansible'

  

  def getlogin():

      try:
@@ -46,14 +64,15 @@ 

          user = pwd.getpwuid(os.geteuid())[0]

      return user

  

+ 

  class LogMech(object):

-     def __init__(self):

+     def __init__(self, logpath):

          self.started = time.time()

          self.pid = str(os.getpid())

          self._pb_fn = None

          self._last_task_start = None

          self.play_info = {}

-         self.logpath = LOG_PATH

+         self.logpath = logpath

          if not os.path.exists(self.logpath):

              try:

                  os.makedirs(self.logpath, mode=0o750)
@@ -78,13 +97,13 @@ 

      def logpath_play(self):

          # this is all to get our path to look nice ish

          tstamp = time.strftime('%Y/%m/%d/%H.%M.%S', time.localtime(self.started))

-         path = os.path.normpath(self.logpath + '/' + self.playbook_id +  '/' + tstamp + '/')

+         path = os.path.normpath(self.logpath + '/' + self.playbook_id + '/' + tstamp + '/')

  

          if not os.path.exists(path):

              try:

                  os.makedirs(path)

              except OSError as e:

-                 if e.errno != 17: # if it is not dir exists then raise it up

+                 if e.errno != 17:  # if it is not dir exists then raise it up

                      raise

  

          return path
@@ -119,22 +138,21 @@ 

              host = 'HOSTMISSING'

  

          if type(data) == dict:

-             name = data.get('module_name',None)

+             name = data.get('module_name', None)

          else:

              name = "unknown"

  

- 

          # we're in setup - move the invocation  info up one level

          if 'invocation' in data:

              invoc = data['invocation']

              if not name and 'module_name' in invoc:

                  name = invoc['module_name']

  

-             #don't add this since it can often contain complete passwords :(

+             # don't add this since it can often contain complete passwords :(

              del(data['invocation'])

  

          if task:

-             name = task.name

+             name = task._name

              data['task_start'] = self._last_task_start

              data['task_end'] = time.time()

              data.update(self.task_to_json(task))
@@ -147,7 +165,7 @@ 

  

          if self.play_info.get('check', False) and self.play_info.get('diff', False):

              category = 'CHECK_DIFF:' + category

-         elif self.play_info.get('check', False):    

+         elif self.play_info.get('check', False):

              category = 'CHECK:' + category

  

          # Sometimes this is None.. othertimes it's fine.  Othertimes it has
@@ -156,14 +174,12 @@ 

              name = name.strip()

  

          sanitize_host = host.replace(' ', '_').replace('>', '-')

-         fd = open(self.logpath_play + '/' + sanitize_host + '.log', 'a')

+         fd = gzip.open(self.logpath_play + '/' + sanitize_host + '.log.gz', 'at')

          now = time.strftime(TIME_FORMAT, time.localtime())

          fd.write(MSG_FORMAT % dict(now=now, name=name, count=count, category=category, data=json.dumps(data)))

          fd.close()

  

  

- logmech = LogMech()

- 

  class CallbackModule(CallbackBase):

      """

      logs playbook results, per host, in /var/log/ansible/hosts
@@ -180,40 +196,44 @@ 

          self.playbook = None

  

          super(CallbackModule, self).__init__()

+         self.set_options()

+         self.logmech = LogMech(self.get_option('log_path'))

  

      def set_play_context(self, play_context):

          self.play_context = play_context

  

      def v2_runner_on_failed(self, result, ignore_errors=False):

          category = 'FAILED'

-         logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)

+         self.logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)

  

      def v2_runner_on_ok(self, result):

          category = 'OK'

-         logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)

+         self.logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)

  

      def v2_runner_on_skipped(self, result):

          category = 'SKIPPED'

          res = {}

-         res['item'] = self._get_item(getattr(result._result, 'results', {}))

-         logmech.log(result._host.get_name(), category, res, self.task, self._task_count)

+         res['item'] = self._get_item_label(getattr(result._result, 'results', {}))

+         self.logmech.log(result._host.get_name(), category, res, self.task, self._task_count)

  

      def v2_runner_on_unreachable(self, result):

          category = 'UNREACHABLE'

          res = {}

          res['output'] = result._result

-         logmech.log(result._host.get_name(), category, res, self.task, self._task_count)

+         self.logmech.log(result._host.get_name(), category, res, self.task, self._task_count)

  

      def v2_runner_on_async_failed(self, result):

          category = 'ASYNC_FAILED'

-         logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)

+         self.logmech.log(result._host.get_name(), category, result._result, self.task, self._task_count)

  

      def v2_playbook_on_start(self, playbook):

          self.playbook = playbook

  

      def v2_playbook_on_task_start(self, task, is_conditional):

          self.task = task

-         logmech._last_task_start = time.time()

+         if self.task:

+             self.task._name = task.get_name().strip()

+         self.logmech._last_task_start = time.time()

          self._task_count += 1

  

      def v2_playbook_on_setup(self):
@@ -222,12 +242,12 @@ 

      def v2_playbook_on_import_for_host(self, result, imported_file):

          res = {}

          res['imported_file'] = imported_file

-         logmech.log(result._host.get_name(), 'IMPORTED', res, self.task)

+         self.logmech.log(result._host.get_name(), 'IMPORTED', res, self.task)

  

      def v2_playbook_on_not_import_for_host(self, result, missing_file):

          res = {}

          res['missing_file'] = missing_file

-         logmech.log(result._host.get_name(), 'NOTIMPORTED', res, self.task)

+         self.logmech.log(result._host.get_name(), 'NOTIMPORTED', res, self.task)

  

      def v2_playbook_on_play_start(self, play):

          self._task_count = 0
@@ -237,7 +257,7 @@ 

              path = os.path.abspath(self.playbook._file_name)

  

              # tel the logger what the playbook is

-             logmech.playbook_id = path

+             self.logmech.playbook_id = path

  

              # if play count == 0

              # write out playbook info now
@@ -249,33 +269,32 @@ 

                  pb_info['extra_vars'] = play._variable_manager.extra_vars

                  pb_info['inventory'] = play._variable_manager._inventory._sources

                  pb_info['playbook_checksum'] = secure_hash(path)

-                 pb_info['check'] = self.play_context.check_mode

-                 pb_info['diff'] = self.play_context.diff

-                 logmech.play_log(json.dumps(pb_info, indent=4))

+                 if hasattr(self, "play_context"):

+                     pb_info['check'] = self.play_context.check_mode

+                     pb_info['diff'] = self.play_context.diff

+                 self.logmech.play_log(json.dumps(pb_info, indent=4))

  

              self._play_count += 1

              # then write per-play info that doesn't duplcate the playbook info

              info = {}

              info['play'] = play.name

              info['hosts'] = play.hosts

-             info['transport'] = str(self.play_context.connection)

              info['number'] = self._play_count

-             info['check'] = self.play_context.check_mode

-             info['diff'] = self.play_context.diff

-             logmech.play_info = info

+             if hasattr(self, "play_context"):

+                 info['transport'] = str(self.play_context.connection)

+                 info['check'] = self.play_context.check_mode

+                 info['diff'] = self.play_context.diff

+             self.logmech.play_info = info

              try:

-                 logmech.play_log(json.dumps(info, indent=4))

+                 self.logmech.play_log(json.dumps(info, indent=4))

              except TypeError:

                  print(("Failed to conver to JSON:", info))

  

- 

      def v2_playbook_on_stats(self, stats):

          results = {}

          for host in list(stats.processed.keys()):

              results[host] = stats.summarize(host)

-             logmech.log(host, 'STATS', results[host])

-         logmech.play_log(json.dumps({'stats': results}, indent=4))

-         logmech.play_log(json.dumps({'playbook_end': time.time()}, indent=4))

-         print(('logs written to: %s' % logmech.logpath_play))

- 

- 

+             self.logmech.log(host, 'STATS', results[host])

+         self.logmech.play_log(json.dumps({'stats': results}, indent=4))

+         self.logmech.play_log(json.dumps({'playbook_end': time.time()}, indent=4))

+         print(('logs written to: %s' % self.logmech.logpath_play))

file modified
+258 -49
@@ -1,23 +1,126 @@ 

- #!/usr/bin/python3

+ #!/usr/bin/python

+ # -*- coding: utf-8 -*-

+ # vim: et ts=4 ai sw=4 sts=0

  import sys

  import json

- from optparse import OptionParser

+ from argparse import ArgumentParser

  import os

+ import re

  import glob

- from datetime import date, timedelta

+ import gzip

+ from datetime import datetime, timedelta

  import dateutil.parser as dateparser

+ try:

+     # Python3

+     import configparser

+ except ImportError:

+     # Python2

+     import ConfigParser as configparser

+ from ansible.config.manager import find_ini_config_file

+ from ansible.utils.color import stringc

+ from ansible import constants as C

+ from collections import Counter

  

- logpath = '/var/log/ansible'

- search_terms = ['CHANGED', 'FAILED']

+ if not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():

+     HAS_COLOR = False

+ else:

+     HAS_COLOR = True

+ 

+ DEFAULT_LOGPATH = '/var/log/ansible'

+ default_search_terms = ['CHANGED', 'FAILED']

  date_terms = {

-   "today": date.today,

-   "yesterday": lambda: date.today() - timedelta(1),

+   "today": lambda: datetime.today().replace(

+       hour=0, minute=0, second=0, microsecond=0),

+   "yesterday": lambda: datetime.today().replace(

+       hour=0, minute=0, second=0, microsecond=0) - timedelta(1),

  }

  

  

+ def colorByCat(category, txt=None):

+     if not txt:

+         txt = category

+     if 'OK' in category:

+         color_out = stringc(txt, C.COLOR_OK)

+     elif "FAILED" in category:

+         color_out = stringc(txt, C.COLOR_ERROR)

+     elif "CHANGED" in category:

+         color_out = stringc(txt, C.COLOR_CHANGED)

+     elif "SKIPPED" in category:

+         color_out = stringc(txt, C.COLOR_SKIP)

+     elif "UNREACHABLE" in category:

+         color_out = stringc(txt, C.COLOR_UNREACHABLE)

+     else:

+         # This hack make sure the text width is the same as any other colored text

+         color_out = u'\x1b[0;00m%s\x1b[0m' % (txt,)

+     if not HAS_COLOR:

+         color_out = txt

+     return color_out

+ 

+ 

+ def colorByStats(txt, stats):

+     if stats['failures'] != 0:

+         return stringc(txt, C.COLOR_ERROR)

+     elif stats['unreachable'] != 0:

+         return stringc(txt, C.COLOR_UNREACHABLE)

+     elif stats['changed'] != 0:

+         return stringc(txt, C.COLOR_CHANGED)

+     else:

+         return stringc(txt, C.COLOR_OK)

+ 

+ 

+ def colorByCount(txt, count, color):

+     s = "%s%s" % (txt, count)

+     if count > 0 and HAS_COLOR:

+         s = stringc(s, color)

+     return s

+ 

+ 

+ def parse_info(infofile):

+     data = {}

+     with open(infofile) as f:

+         content = f.read()

+     obj_list = [x+'}' for x in content.split('\n}')]

+     plays = []

+     for obj in obj_list[:-1]:

+         js = json.loads(obj)

+         if 'play' in js:

+             plays.append(js)

+         else:

+             data.update(json.loads(obj))

+     data['plays'] = plays

+     return data

+ 

+ 

+ def format_stats(stats):

+     return "%s %s %s %s" % (

+         colorByCount("ok:", stats['ok'], C.COLOR_OK),

+         colorByCount("chg:", stats['changed'], C.COLOR_CHANGED),

+         colorByCount("unr:", stats['unreachable'], C.COLOR_UNREACHABLE),

+         colorByCount("fail:", stats['failures'], C.COLOR_ERROR))

+ 

+ 

+ def col_width(rows):

+     widths = []

+     for col in zip(*(rows)):

+         col_width = max(map(len, col))

+         widths.append(col_width)

+     widths[-1] = 0  # don't pad last column

+     return widths

+ 

+ 

  def date_cheat(datestr):

      dc = date_terms.get(datestr, lambda: dateparser.parse(datestr))

-     return dc().strftime("%Y/%m/%d")

+     return dc()

+ 

+ 

+ def date_from_path(path):

+     date_comp = re.search(r'/(\d{4})/(\d{2})/(\d{2})', path)

+     return datetime(*map(int, date_comp.groups()))

+ 

+ 

+ def datetime_from_path(path):

+     date_comp = re.search(r'/(\d{4})/(\d{2})/(\d{2})/(\d{2})\.(\d{2})\.(\d{2})', path)

+     return datetime(*map(int, date_comp.groups()))

  

  

  def parse_args(args):
@@ -33,29 +136,49 @@ 

  

            logview -s ANY -d yesterday -p mirrorlist # list all events from the mirrorlist playbook

  

- 

            """

-     parser = OptionParser(usage=usage)

-     parser.add_option("-d", default='today', dest='datestr', help="time string of when you want logs")

-     parser.add_option("-p", default='*', dest='playbook', help="the playbook you want to look for")

-     parser.add_option("-v", default=False, dest='verbose', action='store_true', help='Verbose')

-     parser.add_option("-s", default=[], dest='search_terms', action='append', help="status to search for")

-     parser.add_option("-l", default=False, dest="list_pb", action='store_true', help="list playbooks for a specific date")

-     parser.add_option("--profile", default=False, dest="profile", action='store_true', help="output timing input per task")

-     (opts, args) = parser.parse_args(args)

+     parser = ArgumentParser(usage=usage)

+     date_group = parser.add_mutually_exclusive_group()

+     date_group.add_argument("-d", default='today', dest='datestr', help="display logs from specified date")

+     date_group.add_argument("--since",  dest="since", help="display logs since specified date")

+     date_group.add_argument("--all", default=False, dest="list_all", action='store_true', help="display all logs")

+     parser.add_argument("-p", default='*', dest='playbook', help="the playbook you want to look for")

+     parser.add_argument("-H", default=[], dest='hostname', action='append', help="Limit to the specified hostname")

+     parser.add_argument("-m", default=False, dest='message', action='store_true', help='Show tasks output')

+     parser.add_argument("-v", default=False, dest='verbose', action='store_true', help='Verbose')

+     parser.add_argument("-s", default=[], dest='search_terms', action='append', help="status to search for")

+     parser.add_argument("-l", default=False, dest="list_pb", action='store_true', help="list playbook runs")

+     parser.add_argument("--profile", default=False, dest="profile", action='store_true', help="output timing input per task")

+     opts = parser.parse_args(args)

  

      opts.datestr = date_cheat(opts.datestr)

      if not opts.search_terms:

-         opts.search_terms = search_terms

-     return opts, args

+         opts.search_terms = default_search_terms

+     if opts.since:

+         opts.since = date_cheat(opts.since)

+     opts.search_terms = list(map(str.upper, opts.search_terms))

+     return opts

  

  

  def search_logs(opts, logfiles):

+     rows = [("Play Date", colorByCat("Hostname"), "Task Time", "Id", colorByCat("State"), "Task Name", "")]

+     # rows = []

      msg = ''

      for fn in sorted(logfiles):

-         hostname = os.path.basename(fn).replace('.log', '')

-         timestamp = os.path.basename(os.path.dirname(fn))

-         for line in open(fn):

+         hostname = os.path.basename(fn).replace('.log', '').replace('.gz', '')

+         timestamp = datetime_from_path(fn).strftime("%a %b %d %Y %H:%M:%S")

+ 

+         if opts.hostname and hostname not in opts.hostname:

+             continue

+ 

+         try:

+             with gzip.open(fn) as f:

+                 f.read()

+             open_f = gzip.open(fn, "rt")

+         except IOError:

+             open_f = open(fn)

+ 

+         for line in open_f:

              things = line.split('\t')

              if len(things) < 5:

                  msg += "(logview error - unhandled line): %r\n" % line
@@ -66,51 +189,137 @@ 

              task_ts, count, category, name, data = things

  

              if category in opts.search_terms or 'ANY' in opts.search_terms:

+                 dur = None

+                 last_col = ""

                  slurp = json.loads(data)

                  if opts.profile:

                      st = slurp.get('task_start', 0)

                      end = slurp.get('task_end', 0)

                      if st and end:

-                         dur = '%.2f' % (float(end) - float(st))

-                     else:

-                         dur = None

+                         dur = '%.2fs' % (float(end) - float(st))

+ 

+                 state = colorByCat(category)

+                 c_hostname = colorByCat(category, hostname)

+ 

+                 if "STATS" in category:

+                     if type(slurp) == dict:

+                         name = format_stats(slurp)

+                         c_hostname = colorByStats(hostname, slurp)

+                         state = colorByStats(category, slurp)

  

-                 msg += '%s\t%s\t%s\t%s\t%s\t%s' % (

-                     timestamp, hostname, task_ts, count, category, name)

+                 result = [timestamp, c_hostname, task_ts, count, state]

+ 

+                 if not name:

+                     name = slurp.get("task_module")

+                 try:

+                     name = name.decode('utf8')

+                 except AttributeError:

+                     pass

+                 result.append(name)

+ 

+                 if dur:

+                     last_col += "%s " % (dur,)

  

                  if not opts.verbose:

                      if type(slurp) == dict:

-                         for term in ['task_userid', 'cmd']:

+                         for term in ['cmd', ]:

                              if term in slurp:

-                                 msg += '\t%s:%s' % (term, slurp.get(term, None))

-                     if opts.profile and dur:

-                         msg += '\t%s:%s' % ('dur', dur)

+                                 last_col += '\t%s:%s' % (term, slurp.get(term, None))

  

-                     msg += '\n'

+                     if opts.message:

+                         for term in ['msg', 'stdout']:

+                             if term in slurp:

+                                 value = slurp.get(term, None)

+                                 if type(value) is list:

+                                     value = "\n".join(value)

+                                 if value:

+                                     last_col += '\n%s: %s\n' % (term, colorByCat(category, value.strip()))

                  else:

-                     if opts.profile and dur:

-                         msg += '\t%s:%s' % ('dur', dur)

-                     msg += '\n'

-                     msg += json.dumps(slurp, indent=4)

-                     msg += '\n'

+                     last_col += '\n'

+                     last_col += json.dumps(slurp, indent=4)

+                     last_col += '\n'

+ 

+                 result.append(last_col)

+                 rows.append(result)

  

-     return msg

+     return rows

  

  

  def main(args):

-     opts, args = parse_args(args)

-     for pb in glob.glob(os.path.join(logpath, opts.playbook)):

-         pb_name = os.path.basename(pb)

-         for pb_logdir in glob.glob(os.path.join(pb, opts.datestr)):

-             if opts.list_pb:

-                 print(pb_name)

+     cfg = find_ini_config_file()

+     if cfg:

+         cp = configparser.ConfigParser()

+         cp.read(cfg)

+         try:

+             logpath = cp.get('callback_logdetail', "log_path")

+         except configparser.NoSectionError:

+             logpath = DEFAULT_LOGPATH

+     opts = parse_args(args)

+     rows = []

+ 

+     # List play summary

+     if opts.list_pb:

+         rows.append(["Date", colorByCat("", "Playbook"), "Ran By", "Hosts", "Stats"])

+         for r, d, f in os.walk(logpath):

+             if opts.since and f and date_from_path(r) < opts.since:

                  continue

+             for file in f:

+                 if file.endswith('.info'):

+                     pb = parse_info(os.path.join(r, file))

+                     pb_name = os.path.splitext(os.path.basename(pb['playbook']))[0]

+                     pb_date = datetime_from_path(r)

+                     if (

+                             opts.list_all or opts.since

+                             or (

+                                 opts.datestr != opts.datestr.replace(hour=0, minute=0, second=0, microsecond=0)

+                                 and opts.datestr == pb_date)

+                             or (

+                                 opts.datestr == opts.datestr.replace(hour=0, minute=0, second=0, microsecond=0)

+                                 and opts.datestr.date() == pb_date.date())):

+                         stats = Counter()

+                         hosts = []

+                         if "stats" in pb:

+                             for host, stat in pb['stats'].items():

+                                 del stat['task_userid']

+                                 stats += Counter(stat)

+                                 hosts.append(host)

+                         host_count = len(set(hosts))

+                         pb_name = colorByStats(pb_name, stats)

+                         summary = format_stats(stats)

+                         # summary = "ok:%s chd:%s unr:%s faild:%s" % (stats['ok'], stats['changed'], stats['unreachable'], stats['failures'])

+ 

+                         rows.append([pb_date.isoformat(), pb_name, pb['userid'], str(host_count), summary])

  

-             logfiles = glob.glob(pb_logdir + '/*/*.log')

-             msg = search_logs(opts, logfiles)

-             if msg:

-                 print(pb_name)

-                 print(msg)

+         m_widths = col_width(rows)

+         if len(rows) <= 1:

+             print("no log")

+         else:

+             for row in rows:

+                 print("  ".join((val.ljust(width) for val, width in zip(row, m_widths))).strip())

+ 

+     # Play detail

+     else:

+         for pb in glob.glob(os.path.join(logpath, opts.playbook)):

+             pb_name = os.path.basename(pb)

+             if opts.list_all or opts.since:

+                 date_glob = glob.glob(os.path.join(pb, "*/*/*"))

+             else:

+                 date_glob = glob.glob(os.path.join(pb, opts.datestr.strftime("%Y/%m/%d")))

+             for pb_logdir in date_glob:

+                 run_date = date_from_path(pb_logdir)

+                 if opts.since and run_date < opts.since:

+                     continue

+                 if opts.datestr != opts.datestr.replace(hour=0, minute=0, second=0, microsecond=0):

+                     logfiles = glob.glob(pb_logdir + '/' + opts.datestr.strftime("%H.%M.%S") + '/*.log*')

+                 else:

+                     logfiles = glob.glob(pb_logdir + '/*/*.log*')

+                 rows = search_logs(opts, logfiles)

+                 if len(rows) > 1:

+                     m_widths = col_width(rows)

+                     print("%s\n-------" % (pb_name,))

+                     for row in rows:

+                         print("  ".join((val.ljust(width) for val, width in zip(row, m_widths))))

+                     print("")

  

  

  if __name__ == "__main__":

I end up using logview quite a lot myself and added a few features I needed you may be interested in.
All of them are open to discussion, and feel free to close this PR if you think it's not relevant for your usage.

Here they are:
Log files:

  • Added an option to specify log path that can be set in your ansible.cfg (default to /var/log/ansible)
  • Added gzip compression
    My ansible control node is heavily space constrained at $dayjob so I needed to keep all logs at a minimum, in my own home directory. I've kept logview retro compatible with old plain text log files, so you can still query previous logs if needed.

Cosmetics changes:

  • Colors ! Same chart as ansible (green, yellow, red for ok, changed, failed). Only enabled when stdout is a tty.
  • Nicely formatted columns display
    insert image to showcase color madness here
  • The list playbook option ( -l ) show detailed info about playbook runs, like this:
$ ./logview -l
Date                 Playbook    Ran By  Hosts  Stats
2020-10-28T10:23:05  update_all  nao     15     ok:17 chg:8 unr:6 fail:0
  • Included play recap in 'stats' status (same as the play recap returned by ansible) :
$ ./logview -s stats -d 2020-10-28T10:23:05
update_all
-------
Play Date                 Hostname          Task Time             Id  State  Task Name                                      
Wed Oct 28 2020 10:23:05  external.at.home  Oct 28 2020 10:32:48  0   STATS  ok:0 chg:0 unr:1 fail:0             
Wed Oct 28 2020 10:23:05  fedo32.home       Oct 28 2020 10:32:48  0   STATS  ok:0 chg:0 unr:1 fail:0             
Wed Oct 28 2020 10:23:05  ha.at.home        Oct 28 2020 10:32:48  0   STATS  ok:2 chg:1 unr:0 fail:0  
Wed Oct 28 2020 10:23:05  haproxy.home      Oct 28 2020 10:32:48  0   STATS  ok:2 chg:1 unr:0 fail:0  
Wed Oct 28 2020 10:23:05  ipa.at.home       Oct 28 2020 10:32:48  0   STATS  ok:2 chg:1 unr:0 fail:0  
Wed Oct 28 2020 10:23:05  utility.at.home   Oct 28 2020 10:32:48  0   STATS  ok:2 chg:1 unr:0 fail:0  

BTW, the status option ( -s ) is not case sensitive anymore

New options:

  • Added date filtering options ( --all and --since ) to display all playbooks ever run, or since a specified date.
  • Host filtering ( -H )
  • Display task output, when applicable ( -m ). Shows the stdout of command module and msg of failed tasks, ie:
$ ./logview -H haproxy.home --all -m -s failed
haproxy.home
-------
Play Date                 Hostname      Task Time             Id  State   Task Name        
Tue Jul 14 2020 14:20:54  haproxy.home  Jul 14 2020 14:22:22  29  FAILED  Install HAProxy  
msg: This command has to be run under the root user.

Tue Jul 14 2020 14:30:31  haproxy.home  Jul 14 2020 14:31:07  31  FAILED  sebool           
msg: Unsupported parameters for (seboolean) module: persistant Supported parameters include: ignore_selinux_state, name, persistent, state

Tue Jul 14 2020 14:31:33  haproxy.home  Jul 14 2020 14:32:23  33  FAILED  Open Firewall    
msg: template error while templating string: expected token ':', got '}'. String: {{{ item.service | default(omit) }}
  • and probably a few more things I don't remember
  • Python 2 & 3 compatibility. My control node at $dayjob is using rhel7 so I need to keep the python27 compatibility for now.

1 new commit added

  • logview: add header, extract play run date from path for easier filtering with -d option
3 years ago

rebased onto 0b88984eda1101cfc863774ec1d47c37374fa3fa

3 years ago

rebased onto 09d9f20

3 years ago

I actually looked at this before break, but then forgot to comment. :)

It looks good to me. Lets merge it!

Pull-Request has been merged by kevin

3 years ago