Belle II Software  release-05-02-19
validation.py
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 
4 # basf2 specific imports
5 from basf2 import statistics
6 from ROOT import PyConfig
7 PyConfig.IgnoreCommandLineOptions = True
8 import ROOT
9 
10 # Normal library imports
11 import math
12 import logging
13 import os
14 import timeit
15 import sys
16 import time
17 import shutil
18 import datetime
19 from typing import List
20 
21 import json_objects
22 import mail_log
23 
24 # A pretty printer. Prints prettier lists, dicts, etc. :)
25 import pprint
26 
27 from validationscript import Script, ScriptStatus
28 from validationfunctions import get_start_time, get_validation_folders, \
29  scripts_in_dir, parse_cmd_line_arguments, get_log_file_paths, \
30  terminal_title_line
31 import validationfunctions
32 
33 import validationserver
34 import validationplots
35 import validationscript
36 import validationpath
37 
38 # local and cluster control backends
39 import localcontrol
40 import clustercontrol
41 import clustercontrolsge
42 import clustercontroldrmaa
43 
44 
45 pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
46 
47 
48 def statistics_plots(
49  file_name='',
50  timing_methods=None,
51  memory_methds=None,
52  contact='',
53  job_desc='',
54  prefix='',
55 ):
56  """
57  Add memory usage and execution time validation plots to the given root
58  file. The current root file will be used if the fileName is empty (
59  default).
60  """
61 
62  if not timing_methods:
63  timing_methods = [statistics.INIT, statistics.EVENT]
64  if not memory_methds:
65  memory_methds = [statistics.INIT, statistics.EVENT]
66 
67  # Open plot file
68  save_dir = ROOT.gDirectory
69  plot_file = None
70  if file_name:
71  plot_file = ROOT.TFile.Open(file_name, 'UPDATE')
72 
73  if not job_desc:
74  job_desc = sys.argv[1]
75 
76  # Global timing
77  method_name = {}
78  h_global_timing = ROOT.TH1D(
79  prefix + 'GlobalTiming',
80  'Global Timing',
81  5, 0, 5
82  )
83  h_global_timing.SetStats(0)
84  h_global_timing.GetXaxis().SetTitle('method')
85  h_global_timing.GetYaxis().SetTitle('time/call [ms]')
86  h_global_timing.GetListOfFunctions().Add(
87  ROOT.TNamed(
88  'Description',
89  """The (average) time of the different basf2 execution phases
90  for {}. The error bars show the rms of the time
91  distributions.""".format(job_desc)
92  )
93  )
94  h_global_timing.GetListOfFunctions().Add(
95  ROOT.TNamed(
96  'Check',
97  """There should be no significant and persistent increases in
98  the the run time of the methods. Only cases where the increase
99  compared to the reference or previous versions persists for at
100  least two consecutive revisions should be reported since the
101  measurements can be influenced by load from other processes on
102  the execution host."""
103  )
104  )
105  if contact:
106  h_global_timing.GetListOfFunctions().Add(
107  ROOT.TNamed('Contact', contact)
108  )
109  for (index, method) in statistics.StatisticCounters.values.items():
110  method_name[method] = str(method)[0] \
111  + str(method).lower()[1:].replace('_r', 'R')
112  if index == 5:
113  break
114  h_global_timing.SetBinContent(
115  index + 1,
116  statistics.get_global().time_mean(method) * 1e-6
117  )
118  h_global_timing.SetBinError(
119  index + 1,
120  statistics.get_global().time_stddev(method) * 1e-6
121  )
122  h_global_timing.GetXaxis().SetBinLabel(
123  index + 1,
124  method_name[method]
125  )
126  h_global_timing.Write()
127 
128  # Timing per module for the different methods
129  modules = statistics.modules
130  h_module_timing = ROOT.TH1D(
131  prefix + 'ModuleTiming',
132  'Module Timing',
133  len(modules), 0, len(modules)
134  )
135  h_module_timing.SetStats(0)
136  h_module_timing.GetXaxis().SetTitle('module')
137  h_module_timing.GetYaxis().SetTitle('time/call [ms]')
138  h_module_timing.GetListOfFunctions().Add(
139  ROOT.TNamed(
140  'Check',
141  """There should be no significant and persistent increases in
142  the run time of a module. Only cases where the increase compared
143  to the reference or previous versions persists for at least two
144  consecutive revisions should be reported since the measurements
145  can be influenced by load from other processes on the execution
146  host."""))
147  if contact:
148  h_module_timing.GetListOfFunctions().Add(ROOT.TNamed(
149  'Contact', contact)
150  )
151  for method in timing_methods:
152  h_module_timing.SetTitle('Module %s Timing' % method_name[method])
153  h_module_timing.GetListOfFunctions().Add(
154  ROOT.TNamed(
155  'Description',
156  """The (average) execution time of the %s method of modules
157  for %s. The error bars show the rms of the time
158  distributions.""" %
159  (method_name[method],
160  job_desc)))
161  index = 1
162  for modstat in modules:
163  h_module_timing.SetBinContent(
164  index, modstat.time_mean(method) * 1e-6)
165  h_module_timing.SetBinError(
166  index, modstat.time_stddev(method) * 1e-6)
167  h_module_timing.GetXaxis().SetBinLabel(
168  index, modstat.name)
169  index += 1
170  h_module_timing.Write('%s%sTiming' % (prefix, method_name[method]))
171  h_module_timing.GetListOfFunctions().RemoveLast()
172 
173  # Memory usage profile
174  memory_profile = ROOT.Belle2.PyStoreObj('VirtualMemoryProfile', 1)
175  if memory_profile:
176  memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
177  'Description',
178  f'The virtual memory usage vs. the event number for {job_desc}.')
179  )
180  memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
181  'Check',
182  """The virtual memory usage should be flat for high event
183  numbers. If it keeps rising this is an indication of a memory
184  leak.<br>There should also be no significant increases with
185  respect to the reference (or previous revisions if no reference
186  exists).""")
187  )
188  if contact:
189  memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
190  'Contact', contact)
191  )
192  memory_profile.obj().Write(prefix + 'VirtualMemoryProfile')
193 
194  # Rss Memory usage profile
195  memory_profile = ROOT.Belle2.PyStoreObj('RssMemoryProfile', 1)
196  if memory_profile:
197  memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
198  'Description',
199  f'The rss memory usage vs. the event number for {job_desc}.')
200  )
201  memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
202  'Check',
203  """The rss memory usage should be flat for high event numbers.
204  If it keeps rising this is an indication of a memory
205  leak.<br>There should also be no significant increases with
206  respect to the reference (or previous revisions if no reference
207  exists). In the (rare) case that memory is swapped by the OS,
208  the rss memory usage can decrease.""")
209  )
210  if contact:
211  memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
212  'Contact', contact)
213  )
214  memory_profile.obj().Write(prefix + 'RssMemoryProfile')
215 
216  # Memory usage per module for the different methods
217  sqrt_n = 1 / math.sqrt(statistics.get_global().calls() - 1)
218  h_module_memory = ROOT.TH1D(
219  prefix + 'ModuleMemory',
220  'Virtual Module Memory',
221  len(modules), 0, len(modules)
222  )
223  h_module_memory.SetStats(0)
224  h_module_memory.GetXaxis().SetTitle('module')
225  h_module_memory.GetYaxis().SetTitle('memory increase/call [kB]')
226  h_module_memory.GetListOfFunctions().Add(ROOT.TNamed(
227  'Description',
228  f'The (average) increase in virtual memory usage per call of the '
229  f'{method_name[method]} method of modules for {job_desc}.')
230  )
231  h_module_memory.GetListOfFunctions().Add(ROOT.TNamed(
232  'Check',
233  'The increase in virtual memory usage per call for each module '
234  'should be consistent with zero or the reference.')
235  )
236  if contact:
237  h_module_memory.GetListOfFunctions().Add(ROOT.TNamed(
238  'Contact', contact)
239  )
240  for method in memory_methds:
241  h_module_memory.SetTitle('Module %s Memory' % method_name[method])
242  index = 1
243  for modstat in modules:
244  h_module_memory.SetBinContent(index, modstat.memory_mean(method))
245  h_module_memory.SetBinError(
246  index,
247  modstat.memory_stddev(method) * sqrt_n
248  )
249  h_module_memory.GetXaxis().SetBinLabel(index, modstat.name)
250  index += 1
251  h_module_memory.Write('%s%sMemory' % (prefix, method_name[method]))
252  h_module_memory.GetListOfFunctions().RemoveLast()
253 
254  if plot_file:
255  plot_file.Close()
256  save_dir.cd()
257 
258 
259 def event_timing_plot(
260  data_file,
261  file_name='',
262  max_time=20.0,
263  burn_in=1,
264  contact='',
265  job_desc='',
266  prefix='',
267 ):
268  """
269  Add a validation histogram of event execution time to the given root file.
270  The current root file will be used if the fileName is empty (default).
271  The data file has to contain the profile information created by the Profile
272  module.
273  """
274 
275  if not job_desc:
276  job_desc = os.path.basename(sys.argv[0])
277 
278  # Get histogram with time vs event number
279  save_dir = ROOT.gDirectory
280  data = ROOT.TFile.Open(data_file)
281  tree = data.Get("tree")
282  entries = tree.GetEntries()
283  tree.Draw('Entry$>>hEventTime(%d,-0.5,%d.5)' % (entries, entries - 1),
284  'ProfileInfo.m_timeInSec', 'goff')
285  # load the histogram created by the above Draw command
286  h_event_time = data.Get("hEventTime")
287  h_event_time.SetDirectory(0)
288  data.Close()
289  save_dir.cd()
290 
291  # Open plot file
292  plot_file = None
293  if file_name:
294  plot_file = ROOT.TFile.Open(file_name, 'UPDATE')
295 
296  # Create and fill histogram with event execution time distribution
297  stat = ROOT.gStyle.GetOptStat()
298  ROOT.gStyle.SetOptStat(101110)
299  h_timing = ROOT.TH1D(prefix + 'Timing', 'Event Timing', 100, 0, max_time)
300  h_timing.UseCurrentStyle()
301  h_timing.GetXaxis().SetTitle('time [s]')
302  h_timing.GetYaxis().SetTitle('events')
303  h_timing.GetListOfFunctions().Add(ROOT.TNamed(
304  'Description',
305  f'The distribution of event execution times for {job_desc}.')
306  )
307  h_timing.GetListOfFunctions().Add(ROOT.TNamed(
308  'Check',
309  'The distribution should be consistent with the reference (or '
310  'previous revisions if no reference exists).')
311  )
312  if contact:
313  h_timing.GetListOfFunctions().Add(ROOT.TNamed('Contact', contact))
314  for event in range(1 + burn_in, entries + 1):
315  h_timing.Fill(
316  h_event_time.GetBinContent(event) -
317  h_event_time.GetBinContent(event - 1)
318  )
319  h_timing.Write()
320  ROOT.gStyle.SetOptStat(stat)
321 
322  if plot_file:
323  plot_file.Close()
324  save_dir.cd()
325 
326 
327 def draw_progress_bar(delete_lines: int, scripts: List[Script], barlength=50):
328  """
329  This function plots a progress bar of the validation, i.e. it shows which
330  percentage of the scripts has been executed yet.
331  It furthermore also shows which scripts are currently running, as well as
332  the total runtime of the validation.
333 
334  @param delete_lines: The amount of lines which need to be deleted before
335  we can redraw the progress bar
336  @param scripts: List of all Script obejcts
337  @param barlength: The length of the progess bar (in characters)
338  @return: The number of lines that were printed by this function call.
339  Usefule if this function is called repeatedly.
340  """
341 
342  # Get statistics: Number of finished scripts + number of scripts in total
343  finished_scripts = len([
344  _ for _ in scripts
345  if _.status in [
349  ]
350  ])
351  all_scripts = len(scripts)
352  percent = 100.0 * finished_scripts / all_scripts
353 
354  # Get the runtime of the script
355  runtime = int(timeit.default_timer() - get_start_time())
356 
357  # Move the cursor up and clear lines
358  for i in range(delete_lines):
359  print("\x1b[2K \x1b[1A", end=' ')
360 
361  # Print the progress bar:
362  progressbar = ""
363  for i in range(barlength):
364  if i < int(barlength * percent / 100.0):
365  progressbar += '='
366  else:
367  progressbar += ' '
368  print(
369  f'\x1b[0G[{progressbar}] {percent:6.1f}% '
370  f'({finished_scripts}/{all_scripts})')
371 
372  # Print the total runtime:
373  print(f'Runtime: {runtime}s')
374 
375  # Print the list of currently running scripts:
376  running = [os.path.basename(__.path) for __ in scripts
378 
379  # If nothing is repeatedly running
380  if not running:
381  running = ['-']
382 
383  print(f'Running: {running[0]}')
384  for __ in running[1:]:
385  print('{0} {1}'.format(len('Running:') * " ", __))
386 
387  return len(running) + 2
388 
389 
391  """
392  This can be used to parse the execution intervals of validation scripts
393  and can check whether a script object is in the list of intervals
394  configured in this class.
395  """
396 
397  def __init__(self, intervals):
398  """
399  Initializes the IntervalSelector class with a list of intervals which
400  should be selected
401  """
402 
403 
404  self.intervals = [x.strip() for x in intervals]
405 
406  def in_interval(self, script_object):
407  """
408  checks whether the interval listed in a script object's header is
409  within the selected
410  """
411 
412  # for scripts, which have no interval set, the default is nightly
413  script_interval = "nightly"
414 
415  if script_object.header is not None:
416  if "interval" in script_object.header:
417  script_interval = script_object.header["interval"]
418 
419  return script_interval in self.intervals
420 
421 
422 
425 
426 
427 # todo: [Ref, low prio, low work] Denote private methods with underscore
428 # /klieret
429 class Validation:
430 
431  """!
432  This is the class that provides all global variables, like 'list_of_files'
433  etc. There is only one instance of this class with name 'validation. This
434  allows to use some kind of namespace, i.e. global variables will always be
435  referenced as validation.[name of variable]. This makes it easier to
436  distinguish them from local variables that only exist within the scope of a
437  or a method.
438 
439  @var tag: The name of the folder within the results directory
440  @var log: Reference to the logging object for this validation instance
441  @var basepaths: The paths to the local and central release directory
442  @var scripts: List of all Script objects for steering files
443  @var packages: List of all packages which contributed scripts
444  @var basf2_options: The options to be given to the basf2 command
445  @var mode: Whether to run locally or on a cluster
446  @var quiet: No progress bar in quiet mode
447  @var dry: Dry runs do not actually start any scripts (for debugging)
448  """
449 
450  def __init__(self, tag='current'):
451  """!
452  The default constructor. Initializes all those variables that will be
453  globally accessible later on. Does not return anything.
454  """
455 
456  # The name which will be used to create a folder in the results
457  # directory. Default is 'current'.
458  self.tag = tag
459 
460  # This dictionary holds the paths to the local and central release dir
461  # (or 'None' if one of them does not exist)
462  self.basepaths = validationpath.get_basepath()
463 
464  # Folder used for the intermediate and final results of the validation
465  self.work_folder = os.path.abspath(os.getcwd())
466 
467  # The logging-object for the validation (Instance of the logging-
468  # module). Initialize the log as 'None' and then call the method
469  # 'create_log()' to create the actual log.
470  self.log = None
471  self.create_log()
472 
473  # The list which holds all steering file objects
474  # (as instances of class Script)
475  self.scripts = []
476 
477  # A list of all packages from which we have collected steering files
478  self.packages = []
479 
480  # This list of packages which will be ignored by default. This is
481  # only the validation package itself, because it only creates
482  # test-plots for validation development. To see only the
483  # validation-package output, use the --test command line flag
484  self.ignored_packages = ["validation-test"]
485 
486  # Additional arguments for basf2, if we received any from the command
487  # line arguments
488  self.basf2_options = ''
489 
490  # A variable which holds the mode, i.e. 'local' for local
491  # multi-processing and 'cluster' for cluster usage
492  self.mode = None
493 
494  # Defines whether the validation is run in quiet mode, i.e. without
495  # the dynamic progress bar
496  self.quiet = False
497 
498  # Defines if a dry run is performed, i.e. a run where the steering
499  # files are not actually started (for debugging purposes)
500  self.dry = False
501 
502  # If this is set, dependencies will be ignored.
503  self.ignore_dependencies = False
504 
505  # : reporting time (in minutes)
506  # the time in minutes when there will
507  # be the first log output if a script is still not complete This
508  # prints every x minutes which scripts are still running
509  self.running_script_reporting_interval = 30
510 
511 
514  self.script_max_runtime_in_minutes = 60*5
515 
516 
517  self.parallel = None
518 
519  def get_useable_basepath(self):
520  """
521  Checks if a local path is available. If only a central release is
522  available, return the path to this central release
523  """
524  if self.basepaths["local"]:
525  return self.basepaths["local"]
526  else:
527  return self.basepaths["central"]
528 
529  @staticmethod
530  def get_available_job_control():
531  """
532  insert the possible backend controls, they will be checed via their
533  is_supported method if they actually can be executed in the current
534  environment
535  """
536  return [localcontrol.Local,
540 
541  @staticmethod
542  def get_available_job_control_names():
543  return [c.name() for c in Validation.get_available_job_control()]
544 
545  def build_dependencies(self):
546  """!
547  This method loops over all Script objects in self.scripts and
548  calls their compute_dependencies()-method.
549  @return: None
550  """
551  for script_object in self.scripts:
552  script_object.compute_dependencies(self.scripts)
553 
554  # Make sure dependent scripts of skipped scripts are skipped, too.
555  for script_object in self.scripts:
556  if script_object.status == ScriptStatus.skipped:
557  self.skip_script(
558  script_object,
559  reason="Depends on '{}'".format(script_object.path)
560  )
561 
562  def build_headers(self):
563  """!
564  This method loops over all Script objects in self.scripts and
565  calls their load_header()-method.
566  @return: None
567  """
568  for script_object in self.scripts:
569  script_object.load_header()
570 
571  def skip_script(self, script_object, reason=""):
572  """!
573  This method sets the status of the given script and all dependent ones
574  to 'skipped'.
575  @param script_object: Script object to be skipped.
576  @param reason: Reason for skipping object
577  @return: None
578  """
579  # Print a warning if the status of the script is changed and then
580  # set it to 'skipped'.
581  if script_object.status not in [ScriptStatus.skipped,
583  self.log.warning('Skipping ' + script_object.path)
584  if reason:
585  self.log.debug("Reason for skipping: {}.".format(reason))
586  script_object.status = ScriptStatus.skipped
587 
588  # Also skip all dependent scripts.
589  for dependent_script in self.scripts:
590  if script_object in dependent_script.dependencies:
591  self.skip_script(
592  dependent_script,
593  reason="Depends on '{}'".format(script_object.path)
594  )
595 
596  def create_log(self):
597  """!
598  Create the logger.
599  We use the logging module to create an object which allows us to
600  comfortably log everything that happens during the execution of
601  this script and even have different levels of importance, such as
602  'ERROR' or 'DEBUG'.
603  @return: None
604  """
605  # Create the log and set its default level to DEBUG, which means that
606  # it will store _everything_.
607  self.log = logging.getLogger('validate_basf2')
608  self.log.setLevel(logging.DEBUG)
609 
610  # Now we add another custom level 'NOTE'. This is because we don't
611  # want to print ERRORs and WARNINGs to the console output, therefore
612  # we need a higher level.
613  # We define the new level and tell 'self.log' what to do when we use it
614  logging.NOTE = 100
615  logging.addLevelName(logging.NOTE, 'NOTE')
616  self.log.note = lambda msg, *args: self.log._log(logging.NOTE,
617  msg, args)
618 
619  # Set up the console handler. The console handler will redirect a
620  # certain subset of all log message (i.e. those with level 'NOTE') to
621  # the command line (stdout), so we know what's going on when we
622  # execute the validation.
623 
624  # Define the handler and its level (=NOTE)
625  console_handler = logging.StreamHandler()
626  console_handler.setLevel(logging.NOTE)
627 
628  # Format the handler. We only need the message, no date/time etc.
629  console_format = logging.Formatter('%(message)s')
630  console_handler.setFormatter(console_format)
631 
632  # Add the console handler to self.log
633  self.log.addHandler(console_handler)
634 
635  # Now set up the file handler. The file handler will redirect
636  # _everything_ we log to a logfile so that we have all possible
637  # information available for debugging later.
638 
639  # Make sure the folder for the log file exists
640  log_dir = self.get_log_folder()
641  if not os.path.exists(log_dir):
642  print("Creating " + log_dir)
643  os.makedirs(log_dir)
644 
645  # Define the handler and its level (=DEBUG to get everything)
646  file_handler = logging.FileHandler(
647  os.path.join(log_dir, 'validate_basf2.log'),
648  'w+'
649  )
650  file_handler.setLevel(logging.DEBUG)
651 
652  # Format the handler. We want the datetime, the module that produced
653  # the message, the LEVEL of the message and the message itself
654  file_format = logging.Formatter('%(asctime)s - %(module)s - '
655  '%(levelname)s - %(message)s',
656  datefmt='%Y-%m-%d %H:%M:%S')
657  file_handler.setFormatter(file_format)
658 
659  # Add the file handler to self.log
660  self.log.addHandler(file_handler)
661 
662  def collect_steering_files(self, interval_selector):
663  """!
664  This function will collect all steering files from the local and
665  central release directory.
666  @return: None
667  """
668 
669  # Get all folders that contain steering files, first the local ones
670  validation_folders = get_validation_folders(
671  'local', self.basepaths, self.log)
672 
673  # Then add those central folders that do not have a local match
674  for (package, folder) in get_validation_folders(
675  'central', self.basepaths, self.log).items():
676  if package not in validation_folders:
677  validation_folders[package] = folder
678 
679  # remove packages which have been explicitly ignored
680  for ignored in self.ignored_packages:
681  if ignored in validation_folders:
682  del validation_folders[ignored]
683 
684  # Now write to self.packages which packages we have collected
685  self.packages = list(validation_folders.keys())
686 
687  # Finally, we collect the steering files from each folder we have
688  # collected:
689  for (package, folder) in validation_folders.items():
690 
691  # Collect only *.C and *.py files
692  c_files = scripts_in_dir(folder, self.log, '.C')
693  py_files = scripts_in_dir(folder, self.log, '.py')
694  for steering_file in c_files + py_files:
695  script = Script(steering_file, package, self.log)
696 
697  script.load_header()
698  # only select this script, if this interval has been selected
699  if interval_selector.in_interval(script):
700  self.scripts.append(script)
701 
702  # Thats it, now there is a complete list of all steering files on
703  # which we are going to perform the validation in self.scripts
704 
705  def get_log_folder(self):
706  """!
707  Get the log folder for this validation run. The command log
708  files (successful, failed) scripts will be recorded there
709  """
710  return validationpath.get_results_tag_folder(self.work_folder,
711  self.tag)
712 
713  def log_failed(self):
714  """!
715  This method logs all scripts with property failed into a single file
716  to be read in run_validation_server.py
717  """
718 
719  failed_log_path = os.path.join(
720  self.get_log_folder(),
721  "list_of_failed_scripts.log"
722  )
723  self.log.note(f"Writing list of failed scripts to {failed_log_path}.")
724 
725  # Select only failed scripts
726  failed_scripts = [
727  script for script in self.scripts
728  if script.status == ScriptStatus.failed
729  ]
730 
731  with open(failed_log_path, "w+") as list_failed:
732  # log the name of all failed scripts
733  for script in failed_scripts:
734  list_failed.write(script.path.split("/")[-1] + "\n")
735 
736  def log_skipped(self):
737  """!
738  This method logs all scripts with property skipped into a single file
739  to be read in run_validation_server.py
740  """
741 
742  skipped_log_path = os.path.join(
743  self.get_log_folder(),
744  "list_of_skipped_scripts.log"
745  )
746  self.log.note(
747  f"Writing list of skipped scripts to {skipped_log_path}."
748  )
749 
750  # Select only failed scripts
751  skipped_scripts = [
752  script for script in self.scripts
753  if script.status == ScriptStatus.skipped
754  ]
755 
756  with open(skipped_log_path, "w+") as list_skipped:
757  # log the name of all failed scripts
758  for script in skipped_scripts:
759  list_skipped.write(script.path.split("/")[-1] + "\n")
760 
761  def report_on_scripts(self):
762  """!
763  Print a summary about all scripts, especially highlighting
764  skipped and failed scripts.
765  """
766 
767  failed_scripts = [
768  script.package + "/" + script.name for script in self.scripts
769  if script.status == ScriptStatus.failed
770  ]
771  skipped_scripts = [
772  script.package + "/" + script.name for script in self.scripts
773  if script.status == ScriptStatus.skipped
774  ]
775 
776  self.log.note("")
777  self.log.note(terminal_title_line(
778  "Summary of script execution", level=0
779  ))
780  self.log.note(f"Total number of scripts: {len(self.scripts)}")
781  self.log.note("")
782  if skipped_scripts:
783  self.log.note("{}/{} scripts were skipped".format(
784  len(skipped_scripts), len(self.scripts)))
785  for s in skipped_scripts:
786  self.log.note("* {}".format(s))
787  self.log.note("")
788  else:
789  self.log.note("No scripts were skipped. Nice!")
790  self.log.note("")
791 
792  if failed_scripts:
793  self.log.note("{}/{} scripts failed".format(
794  len(failed_scripts), len(self.scripts)))
795  for s in failed_scripts:
796  self.log.note(f"* {s}")
797  self.log.note("")
798  else:
799  self.log.note("No scripts failed. Nice!")
800  self.log.note("")
801 
803  total=len(self.scripts),
804  failure=len(failed_scripts) + len(skipped_scripts)
805  ))
806 
807  def set_runtime_data(self):
808  """!
809  This method sets runtime property of each script.
810  """
811 
812  run_times = {}
813  path = validationpath.get_results_runtime_file(self.work_folder)
814  with open(path, "r") as runtimes:
815 
816  # Get our data
817  for line in runtimes:
818  run_times[line.split("=")[0].strip()] = \
819  line.split("=")[1].strip()
820 
821  # And try to set a property for each script
822  for script in self.scripts:
823  try:
824  script.runtime = float(run_times[script.name])
825  # If we don't have runtime data, then set it to an average of
826  # all runtimes
827  except KeyError:
828  suma = 0.0
829  for dict_key in run_times:
830  suma += float(run_times[dict_key])
831  script.runtime = suma / len(run_times)
832 
833  def get_script_by_name(self, name):
834  """!
835 
836  """
837 
838  l = [s for s in self.scripts if s.name == name]
839  if len(l) == 1:
840  return l[0]
841  else:
842  return None
843 
844  def apply_package_selection(self, selected_packages,
845  ignore_dependencies=False):
846  """!
847  Only select packages from a specific set of packages, but still
848  honor the dependencies to outside scripts which may exist
849  """
850 
851  to_keep_dependencies = set()
852 
853  # compile the dependencies of selected scripts
854  # todo: won't work for nested dependencies
855  if not ignore_dependencies:
856  for script_obj in self.scripts:
857  if script_obj.package in selected_packages:
858  for dep in script_obj.dependencies:
859  to_keep_dependencies.add(dep.unique_name())
860  # now, remove all scripts from the script list, which are either
861  # not in the selected packages or have a dependency to them
862  self.scripts = [
863  s for s in self.scripts if (
864  s.package in selected_packages) or (
865  s.unique_name() in to_keep_dependencies)]
866 
867  # Check if some of the selected_packages were not found.
868  packages = set(s.package for s in self.scripts)
869  packages_not_found = list(set(selected_packages) - packages)
870  if packages_not_found:
871  msg = "You asked to select the package(s) {}, but they were not " \
872  "found.".format(', '.join(packages_not_found))
873  self.log.note(msg)
874  self.log.warning(msg)
875 
876  def apply_script_selection(self, script_selection,
877  ignore_dependencies=False):
878  """!
879  This method will take the validation file name ( e.g.
880  "FullTrackingValidation.py" ), determine all the script it depends on
881  and set the status of theses scripts to "waiting", The status of all
882  other scripts will be set to "skipped", which means they will not be
883  executed in the validation run. If ignore_dependencies is True,
884  dependencies will also be set to "skipped".
885  """
886 
887  # change file extension
888  script_selection = [
889  Script.sanitize_file_name(s) for s in script_selection
890  ]
891 
892  scripts_to_enable = set()
893 
894  # find the dependencies of each selected script
895  for script in script_selection:
896  scripts_to_enable.add(script)
897  script_obj = self.get_script_by_name(script)
898 
899  if script_obj is None:
900  self.log.error(
901  f"Script with name {script} cannot be found, skipping for "
902  f"selection"
903  )
904  continue
905 
906  others = script_obj.get_recursive_dependencies(self.scripts)
907  if not ignore_dependencies:
908  scripts_to_enable = scripts_to_enable.union(others)
909 
910  # enable all selections and dependencies
911  for script_obj in self.scripts:
912  if script_obj.name in scripts_to_enable:
913  self.log.warning(
914  f"Enabling script {script_obj.name} because it was "
915  f"selected or a selected script depends on it."
916  )
917  script_obj.status = ScriptStatus.waiting
918  else:
919  self.log.warning(
920  f"Disabling script {script_obj.name} because it was "
921  f"not selected."
922  )
923  script_obj.status = ScriptStatus.skipped
924 
925  # Check if some of the selected_packages were not found.
926  script_names = set(
927  Script.sanitize_file_name(s.name) for s in self.scripts
928  )
929  scripts_not_found = set(script_selection) - script_names
930  if scripts_not_found:
931  msg = "You requested script(s) {}, but they seem to not have " \
932  "been found.".format(", ".join(scripts_not_found))
933  self.log.note(msg)
934  self.log.warning(msg)
935 
936  def apply_script_caching(self):
937  cacheable_scripts = [s for s in self.scripts if s.is_cacheable()]
938 
939  output_dir_datafiles = validationpath.get_results_tag_folder(
940  self.work_folder, self.tag)
941 
942  for s in cacheable_scripts:
943  # for for all output files
944  outfiles = s.get_output_files()
945  files_exist = True
946  for of in outfiles:
947  full_path = os.path.join(output_dir_datafiles, of)
948  files_exist = files_exist and os.path.isfile(full_path)
949 
950  if files_exist:
951  s.status = ScriptStatus.cached
952 
953  # Remove all cached scripts from the dependencies
954  # of dependent script objects, they will not be
955  # executed and no one needs to wait for them
956  for script in self.scripts:
957  for dep_script in script.dependencies:
958  # check if the script this one is depending on is
959  # in cached execution
960  if dep_script.status == ScriptStatus.cached:
961  script.dependencies.remove(dep_script)
962 
963  def store_run_results_json(self, git_hash):
964 
965  # retrieve the git hash which was used for executing this validation
966  # scripts
967  json_package = []
968  for p in self.packages:
969  this_package_scrits = [
970  s for s in self.scripts if s.package == p
971  ]
972  json_scripts = [s.to_json(self.tag) for s in this_package_scrits]
973 
974  # count the failed scripts
975  fail_count = sum([
976  s.status == ScriptStatus.failed for s in this_package_scrits
977  ])
978  json_package.append(json_objects.Package(
979  p,
980  scriptfiles=json_scripts,
981  fail_count=fail_count)
982  )
983 
984  # todo: assign correct color here
985  rev = json_objects.Revision(
986  label=self.tag,
987  creation_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
988  creation_timezone=validationfunctions.get_timezone(),
989  packages=json_package,
990  git_hash=git_hash
991  )
994  self.work_folder, self.tag
995  ),
996  rev
997  )
998 
999  def add_script(self, script):
1000  """!
1001  Explictly add a script object. In normal operation, scripts are
1002  auto-discovered but this method is useful for testing
1003  """
1004 
1005  self.scripts.append(script)
1006 
1007  @staticmethod
1008  def sort_scripts(script_list):
1009  """
1010  Sort the list of scripts that have to be processed by runtime,
1011  execute slow scripts first If no runtime information is available
1012  from the last execution, run the scripts in the validation package
1013  first because they are log running and used as input for other scripts
1014  """
1015  script_list.sort(
1016  key=lambda x: x.runtime or x.package == "validation",
1017  reverse=True
1018  )
1019 
1020  # todo: if you have to indent by 9 tabs, you know that it's time to refactor /klieret
1021  def run_validation(self):
1022  """!
1023  This method runs the actual validation, i.e. it loops over all
1024  scripts, checks which of them are ready for execution, and runs them.
1025  @return: None
1026  """
1027 
1028  # Use the local execution for all plotting scripts
1029  self.log.note("Initializing local job control for plotting.")
1030  local_control = localcontrol.\
1031  Local(max_number_of_processes=self.parallel)
1032 
1033  # Depending on the selected mode, load either the controls for the
1034  # cluster or for local multi-processing
1035 
1036  self.log.note("Selecting job control for all other jobs.")
1037 
1038  selected_controls = [
1039  c for c in self.get_available_job_control()
1040  if c.name() == self.mode
1041  ]
1042 
1043  if not len(selected_controls) == 1:
1044  print(f"Selected mode {self.mode} does not exist")
1045  sys.exit(1)
1046 
1047  selected_control = selected_controls[0]
1048 
1049  self.log.note("Controller: {} ({})".format(
1050  selected_control.name(),
1051  selected_control.description()
1052  ))
1053 
1054  if not selected_control.is_supported():
1055  print(f"Selected mode {self.mode} is not supported on your system")
1056  sys.exit(1)
1057 
1058  # instantiate the selected job control backend
1059  if selected_control.name() == "local":
1060  control = selected_control(max_number_of_processes=self.parallel)
1061  else:
1062  control = selected_control()
1063 
1064  # read the git hash which is used to produce this validation
1065  src_basepath = self.get_useable_basepath()
1066  git_hash = validationfunctions.get_compact_git_hash(src_basepath)
1067  self.log.debug(
1068  f"Git hash of repository located at {src_basepath} is {git_hash}"
1069  )
1070 
1071  # todo: perhaps we want to have these files in the results folder, don't we? /klieret
1072  # If we do have runtime data, then read them
1073  if os.path.exists("./runtimes.dat") and \
1074  os.stat("./runtimes.dat").st_size:
1075  self.set_runtime_data()
1076  if os.path.exists("./runtimes-old.dat"):
1077  # If there is an old data backup, delete it, we backup only
1078  # one run
1079  os.remove("./runtimes-old.dat")
1080  if self.mode == "local":
1081  # Backup the old data file
1082  shutil.copyfile("./runtimes.dat", "./runtimes-old.dat")
1083 
1084  # Open runtimes log and start logging, but log only if we are
1085  # running in the local mode
1086  if self.mode == "local":
1087  runtimes = open('./runtimes.dat', 'w+')
1088 
1089  if not self.quiet:
1090  # This variable is needed for the progress bar function
1091  progress_bar_lines = 0
1092  print()
1093 
1094  # The list of scripts that have to be processed
1095  remaining_scripts = [script for script in self.scripts
1096  if script.status == ScriptStatus.waiting]
1097 
1098  # Sort the list of scripts that have to be processed by runtime,
1099  # execute slow scripts first
1100  self.sort_scripts(remaining_scripts)
1101 
1102  def handle_finished_script(script_obj: Script):
1103  # Write to log that the script finished
1104  self.log.debug('Finished: ' + script_obj.path)
1105 
1106  # If we are running locally, log a runtime
1107  script_obj.runtime = time.time() - script_obj.start_time
1108  if self.mode == "local":
1109  runtimes.write(script_obj.name + "=" +
1110  str(script_obj.runtime) + "\n")
1111 
1112  # Check for the return code and set variables accordingly
1113  script_obj.status = ScriptStatus.finished
1114  script_obj.returncode = result[1]
1115  if result[1] != 0:
1116  script_obj.status = ScriptStatus.failed
1117  self.log.warning(
1118  f'exit_status was {result[1]} for {script_obj.path}'
1119  )
1120 
1121  # Skip all dependent scripts
1122  self.skip_script(
1123  script_obj,
1124  reason="Script '{}' failed and we set it's status to "
1125  "skipped so that all dependencies are "
1126  "also skipped.".format(script_object.path)
1127  )
1128 
1129  else:
1130  # Remove this script from the dependencies of dependent
1131  # script objects
1132  for dependent_script in remaining_scripts:
1133  if script_obj in dependent_script.dependencies:
1134  dependent_script.dependencies.remove(script_obj)
1135 
1136  # Some printout in quiet mode
1137  if self.quiet:
1138  waiting = [script for script in remaining_scripts
1139  if script.status == ScriptStatus.waiting]
1140  running = [script for script in remaining_scripts
1141  if script.status == ScriptStatus.running]
1142  print(
1143  'Finished [{0},{1}]: {2} -> {3}'.format(
1144  len(waiting),
1145  len(running),
1146  script_obj.path,
1147  script_obj.status
1148  )
1149  )
1150 
1151  def handle_unfinished_script(script_obj: Script):
1152  if (time.time() - script_obj.last_report_time) / 60.0 > \
1153  self.running_script_reporting_interval:
1154  print(
1155  "Script {} running since {} seconds".format(
1156  script_obj.name_not_sanitized,
1157  time.time() - script_obj.start_time))
1158  # explicit flush so this will show up in log file right away
1159  sys.stdout.flush()
1160 
1161  # not finished yet, log time
1162  script_obj.last_report_time = time.time()
1163 
1164  # check for the maximum time a script is allow to run and
1165  # terminate if exceeded
1166  total_runtime_in_minutes = \
1167  (time.time() - script_obj.start_time) / 60.0
1168  if total_runtime_in_minutes > self.script_max_runtime_in_minutes > 0:
1169  script_obj.status = ScriptStatus.failed
1170  self.log.warning(
1171  f'Script {script_obj.path} did not finish after '
1172  f'{total_runtime_in_minutes} minutes, attempting to '
1173  f'terminate. '
1174  )
1175  # kill the running process
1176  script_obj.control.terminate(script_obj)
1177  # Skip all dependent scripts
1178  self.skip_script(
1179  script_obj,
1180  reason=f"Script '{script_object.path}' did not finish in "
1181  f"time, so we're setting it to 'failed' so that all "
1182  f"dependent scripts will be skipped."
1183  )
1184 
1185  def handle_waiting_script(script_obj: Script):
1186  # Determine the way of execution depending on whether
1187  # data files are created
1188  if script_obj.header and \
1189  script_obj.header.get('output', []):
1190  script_obj.control = control
1191  else:
1192  script_obj.control = local_control
1193 
1194  # Do not spawn processes if there are already too many!
1195  if script_obj.control.available():
1196 
1197  # Write to log which script is being started
1198  self.log.debug('Starting ' + script_obj.path)
1199 
1200  # Set script object variables accordingly
1201  if script_obj.status == ScriptStatus.failed:
1202  self.log.warning(
1203  f'Starting of {script_obj.path} failed'
1204  )
1205  else:
1206  script_obj.status = ScriptStatus.running
1207 
1208  # Actually start the script execution
1209  script_obj.control.execute(
1210  script_obj,
1211  self.basf2_options,
1212  self.dry,
1213  self.tag
1214  )
1215 
1216  # Log the script execution start time
1217  script_obj.start_time = time.time()
1218  script_obj.last_report_time = time.time()
1219 
1220  # Some printout in quiet mode
1221  if self.quiet:
1222  waiting = [_ for _ in remaining_scripts
1223  if _.status == ScriptStatus.waiting]
1224  running = [_ for _ in remaining_scripts
1225  if _.status == ScriptStatus.running]
1226  print('Started [{0},{1}]: {2}'.format(
1227  len(waiting), len(running),
1228  script_obj.path)
1229  )
1230 
1231  # While there are scripts that have not yet been executed...
1232  while remaining_scripts:
1233 
1234  # Loop over all steering files / Script objects
1235  for script_object in remaining_scripts:
1236 
1237  # If the script is currently running
1238  if script_object.status == ScriptStatus.running:
1239 
1240  # Check if the script has finished:
1241  result = script_object.control.\
1242  is_job_finished(script_object)
1243 
1244  # If it has finished:
1245  if result[0]:
1246  handle_finished_script(script_object)
1247  else:
1248  handle_unfinished_script(script_object)
1249 
1250  # Otherwise (the script is waiting) and if it is ready to be
1251  # executed
1252  elif not script_object.dependencies:
1253  handle_waiting_script(script_object)
1254 
1255  # Update the list of scripts that have to be processed
1256  remaining_scripts = [
1257  script for script in remaining_scripts
1258  if script.status in [ScriptStatus.waiting,
1260  ]
1261 
1262  # Sort them again, Justin Case
1263  self.sort_scripts(remaining_scripts)
1264 
1265  # Wait for one second before starting again
1266  time.sleep(1)
1267 
1268  # If we are not running in quiet mode, draw the progress bar
1269  if not self.quiet:
1270  progress_bar_lines = draw_progress_bar(
1271  progress_bar_lines,
1272  self.scripts
1273  )
1274 
1275  # Log failed and skipped scripts
1276  self.log_failed()
1277  self.log_skipped()
1278 
1279  # And close the runtime data file
1280  if self.mode == "local":
1281  runtimes.close()
1282  print()
1283 
1284  self.store_run_results_json(git_hash)
1285  # todo: update list of available revisions with the current run
1286 
1287  def create_plots(self):
1288  """!
1289  This method prepares the html directory for the plots if necessary
1290  and creates the plots that include the results from this validation.
1291  @return: None
1292  """
1293 
1294  html_folder = validationpath.get_html_folder(self.work_folder)
1295  results_folder = validationpath.get_results_folder(
1296  self.work_folder
1297  )
1298 
1299  os.makedirs(html_folder, exist_ok=True)
1300 
1301  if not os.path.exists(results_folder):
1302  self.log.error(
1303  f"Folder {results_folder} not found in "
1304  f"the work directory {self.work_folder}, please run "
1305  f"b2validation first"
1306  )
1307 
1308  validationplots.create_plots(force=True, work_folder=self.work_folder)
1309 
1310 
1311 def execute(tag=None, is_test=None):
1312  """!
1313  Parses the command line and executes the full validation suite
1314  :param tag The name that will be used for the current revision.
1315  Default None means automatic.
1316  :param is_test Run in test mode? Default None means that we read this
1317  from the command line arguments (which default to False).
1318  :returns None
1319  """
1320 
1321  # Note: Do not test tag and is_test, but rather cmd_arguments.tag
1322  # and cmd_arguments.is_test!
1323  # Also note that we modify some cmd_arguments below
1324  # (e.g. cmd_arguments.packages is updated if cmd_arguments.test is
1325  # specified).
1326 
1327  # If there is no release of basf2 set up, we can stop the execution
1328  # right here!
1329  if os.environ.get('BELLE2_RELEASE_DIR', None) is None and os.environ.get('BELLE2_LOCAL_DIR', None) is None:
1330  sys.exit('Error: No basf2 release set up!')
1331 
1332  # Otherwise we can start the execution. The mainpart is wrapped in a
1333  # try/except-contruct to fetch keyboard interrupts
1334  # fixme: except instructions make only sense after Validation obj is
1335  # initialized ==> Pull everything until there out of try statement
1336  try:
1337 
1338  # Now we process the command line arguments.
1339  # First of all, we read them in:
1340  cmd_arguments = parse_cmd_line_arguments(
1341  modes=Validation.get_available_job_control_names()
1342  )
1343 
1344  # overwrite with default settings with parameters give in method
1345  # call
1346  if tag is not None:
1347  cmd_arguments.tag = tag
1348  if is_test is not None:
1349  cmd_arguments.test = is_test
1350 
1351  # Create the validation object.
1352  validation = Validation(cmd_arguments.tag)
1353 
1354  # Write to log that we have started the validation process
1355  validation.log.note('Starting validation...')
1356  validation.log.note(
1357  f'Results will stored in a folder named "{validation.tag}"...')
1358  validation.log.note('The (full) log file(s) can be found at {}'.format(
1359  ', '.join(get_log_file_paths(validation.log))
1360  ))
1361  validation.log.note("Please check these logs when encountering "
1362  "unexpected results, as most of the warnings and "
1363  "errors are not written to stdout/stderr.")
1364 
1365  # Check if we received additional arguments for basf2
1366  if cmd_arguments.options:
1367  validation.basf2_options = ' '.join(cmd_arguments.options)
1368  validation.log.note(
1369  f'Received arguments for basf2: {validation.basf2_options}'
1370  )
1371 
1372  # Check if we are using the cluster or local multiprocessing:
1373  validation.mode = cmd_arguments.mode
1374 
1375  # Set if we have a limit on the maximum number of local processes
1376  validation.parallel = cmd_arguments.parallel
1377 
1378  # Check if we are running in quiet mode (no progress bar)
1379  if cmd_arguments.quiet:
1380  validation.log.note("Running in quiet mode (no progress bar).")
1381  validation.quiet = True
1382 
1383  # Check if we are performing a dry run (don't actually start scripts)
1384  if cmd_arguments.dry:
1385  validation.log.note("Performing a dry run; no scripts will be "
1386  "started.")
1387  validation.dry = True
1388 
1389  # If running in test mode, only execute scripts in validation packgase
1390  if cmd_arguments.test:
1391  validation.log.note('Running in test mode')
1392  validation.ignored_packages = []
1393  cmd_arguments.packages = ["validation-test"]
1394 
1395  validation.log.note(
1396  "Release Folder: {}".format(validation.basepaths["central"])
1397  )
1398  validation.log.note(
1399  "Local Folder: {}".format(validation.basepaths["local"])
1400  )
1401 
1402  # Now collect the steering files which will be used in this validation.
1403  validation.log.note('Collecting steering files...')
1404  intervals = cmd_arguments.intervals.split(",")
1405  validation.collect_steering_files(IntervalSelector(intervals))
1406 
1407  # Build headers for every script object we have created
1408  validation.log.note('Building headers for Script objects...')
1409  validation.build_headers()
1410 
1411  # Build dependencies for every script object we have created,
1412  # unless we're asked to ignore them.
1413  if not cmd_arguments.select_ignore_dependencies:
1414  validation.log.note('Building dependencies for Script objects...')
1415  validation.build_dependencies()
1416 
1417  if cmd_arguments.packages:
1418  validation.log.note(
1419  "Applying package selection for the following package(s): " +
1420  ", ".join(cmd_arguments.packages)
1421  )
1422  validation.apply_package_selection(cmd_arguments.packages)
1423 
1424  # select only specific scripts, if this option has been set
1425  if cmd_arguments.select:
1426  validation.log.note("Applying selection for validation scripts")
1427  validation.apply_script_selection(cmd_arguments.select,
1428  ignore_dependencies=False)
1429 
1430  # select only specific scripts and ignore their dependencies if
1431  # option is set
1432  if cmd_arguments.select_ignore_dependencies:
1433  validation.log.note("Applying selection for validation scripts, "
1434  "ignoring their dependencies")
1435  validation.apply_script_selection(
1436  cmd_arguments.select_ignore_dependencies,
1437  ignore_dependencies=True
1438  )
1439 
1440  # check if the scripts which are cacheable can be skipped, because
1441  # their output is already available
1442  if cmd_arguments.use_cache:
1443  validation.log.note("Checking for cached script output")
1444  validation.apply_script_caching()
1445 
1446  # Allow to change the maximal run time of the scripts
1447  if cmd_arguments.max_run_time is not None:
1448  if cmd_arguments.max_run_time > 0:
1449  validation.log.note(
1450  f"Setting maximal run time of the steering files "
1451  f"to {cmd_arguments.max_run_time} minutes."
1452  )
1453  else:
1454  validation.log.note(
1455  "Disabling run time limitation of steering files as "
1456  "requested (max run time set to <= 0)."
1457  )
1458  validation.script_max_runtime_in_minutes = \
1459  cmd_arguments.max_run_time
1460 
1461  # Start the actual validation
1462  validation.log.note('Starting the validation...')
1463  validation.run_validation()
1464 
1465  # Log that the validation has finished and that we are creating plots
1466  validation.log.note('Validation finished...')
1467  if not validation.dry:
1468  validation.log.note('Start creating plots...')
1469  validation.create_plots()
1470  validation.log.note('Plots have been created...')
1471  # send mails
1472  if cmd_arguments.send_mails:
1473  mails = mail_log.Mails(validation)
1474  validation.log.note('Start sending mails...')
1475  # send mails to all users with failed scripts/comparison
1476  if cmd_arguments.send_mails_mode == "incremental":
1477  incremental = True
1478  elif cmd_arguments.send_mails_mode == "full":
1479  incremental = False
1480  else:
1481  incremental = None
1482  mails.send_all_mails(
1483  incremental=incremental
1484  )
1485  validation.log.note(
1486  'Save mail data to {}'.format(
1487  validation.get_log_folder()
1488  )
1489  )
1490  # save json with data about outgoing mails
1491  mails.write_log()
1492  else:
1493  validation.log.note('Skipping plot creation and mailing '
1494  '(dry run)...')
1495 
1496  validation.report_on_scripts()
1497 
1498  # Log that everything is finished
1499  validation.log.note(
1500  'Validation finished! Total runtime: {0}s'.format(
1501  int(timeit.default_timer() - get_start_time())
1502  )
1503  )
1504 
1505  if cmd_arguments.view:
1506  # run local webserver
1507  validationserver.run_server(open_site=True)
1508 
1509  except KeyboardInterrupt:
1510  validation.log.note('Validation terminated by user!')
validationfunctions.congratulator
str congratulator(Optional[Union[int, float]] success=None, Optional[Union[int, float]] failure=None, Optional[Union[int, float]] total=None, just_comment=False, rate_name="Success rate")
Definition: validationfunctions.py:470
validationscript.ScriptStatus.cached
Definition: validationscript.py:52
validationscript.ScriptStatus.failed
Definition: validationscript.py:44
validationpath.get_results_tag_revision_file
def get_results_tag_revision_file(output_base_dir, tag)
Return the absolute path to the revision.json file for one tag folder.
Definition: validationpath.py:136
clustercontrol.Cluster
Definition: clustercontrol.py:18
validationfunctions.get_compact_git_hash
Optional[str] get_compact_git_hash(str repo_folder)
Definition: validationfunctions.py:44
validationserver.run_server
def run_server(ip='127.0.0.1', port=8000, parse_command_line=False, open_site=False, dry_run=False)
Definition: validationserver.py:468
json_objects.Package
Definition: json_objects.py:197
validation.IntervalSelector.intervals
intervals
stores the intervals which have been selected
Definition: validation.py:404
validationscript.ScriptStatus.waiting
Definition: validationscript.py:32
validation.IntervalSelector.in_interval
def in_interval(self, script_object)
Definition: validation.py:406
validationscript.ScriptStatus.finished
Definition: validationscript.py:40
validationpath.get_results_runtime_file
def get_results_runtime_file(output_base_dir)
Return the absolute path to the runtimes.dat file As there is only the runtimes file of the last iter...
Definition: validationpath.py:61
validationscript.ScriptStatus.skipped
Definition: validationscript.py:48
validationpath.get_html_folder
def get_html_folder(output_base_dir)
Return the absolute path to the results folder.
Definition: validationpath.py:77
validationpath.get_results_folder
def get_results_folder(output_base_dir)
Return the absolute path to the results folder.
Definition: validationpath.py:70
clustercontrolsge.Cluster
Definition: clustercontrolsge.py:16
validationscript.ScriptStatus.running
Definition: validationscript.py:36
json_objects.Revision
Definition: json_objects.py:28
validation.IntervalSelector.__init__
def __init__(self, intervals)
Definition: validation.py:397
validationplots.create_plots
def create_plots(revisions=None, force=False, Optional[Queue] process_queue=None, work_folder=".")
Main function starts here! #.
Definition: validationplots.py:798
clustercontroldrmaa.Cluster
Definition: clustercontroldrmaa.py:11
validationpath.get_results_tag_folder
def get_results_tag_folder(output_base_dir, tag)
Return the absolute path to the results folder for one specific tag.
Definition: validationpath.py:118
json_objects.dump
def dump(file_name, obj)
Definition: json_objects.py:554
validationfunctions.get_timezone
str get_timezone()
Function definitions #.
Definition: validationfunctions.py:30
validation.IntervalSelector
Definition: validation.py:390
validationpath.get_basepath
def get_basepath()
Definition: validationpath.py:21