Belle II Software  release-08-01-10
validation.py
1 #!/usr/bin/env python3
2 
3 
10 
11 # basf2 specific imports
12 from basf2 import statistics
13 from ROOT import PyConfig
14 
15 PyConfig.IgnoreCommandLineOptions = True # noqa
16 import ROOT
17 
18 # Normal library imports
19 import math
20 import logging
21 import os
22 import timeit
23 import sys
24 import time
25 import shutil
26 import datetime
27 from typing import List, Optional
28 
29 import json_objects
30 import mail_log
31 
32 # A pretty printer. Prints prettier lists, dicts, etc. :)
33 import pprint
34 
35 from validationscript import Script, ScriptStatus
36 from validationfunctions import (
37  get_start_time,
38  get_validation_folders,
39  scripts_in_dir,
40  parse_cmd_line_arguments,
41  get_log_file_paths,
42  terminal_title_line,
43 )
44 import validationfunctions
45 
46 import validationserver
47 import validationplots
48 import validationscript
49 import validationpath
50 
51 # local and cluster control backends
52 import localcontrol
53 import clustercontrol
54 import clustercontrolsge
55 import clustercontroldrmaa
56 
57 
58 pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
59 
60 
61 def statistics_plots(
62  file_name="",
63  timing_methods=None,
64  memory_methds=None,
65  contact="",
66  job_desc="",
67  prefix="",
68 ):
69  """
70  Add memory usage and execution time validation plots to the given root
71  file. The current root file will be used if the fileName is empty (
72  default).
73  """
74 
75  if not timing_methods:
76  timing_methods = [statistics.INIT, statistics.EVENT]
77  if not memory_methds:
78  memory_methds = [statistics.INIT, statistics.EVENT]
79 
80  # Open plot file
81  save_dir = ROOT.gDirectory
82  plot_file = None
83  if file_name:
84  plot_file = ROOT.TFile.Open(file_name, "UPDATE")
85 
86  if not job_desc:
87  job_desc = sys.argv[1]
88 
89  # Global timing
90  method_name = {}
91  h_global_timing = ROOT.TH1D(
92  prefix + "GlobalTiming", "Global Timing", 5, 0, 5
93  )
94  h_global_timing.SetStats(0)
95  h_global_timing.GetXaxis().SetTitle("method")
96  h_global_timing.GetYaxis().SetTitle("time/call [ms]")
97  h_global_timing.GetListOfFunctions().Add(
98  ROOT.TNamed(
99  "Description",
100  """The (average) time of the different basf2 execution phases
101  for {}. The error bars show the rms of the time
102  distributions.""".format(
103  job_desc
104  ),
105  )
106  )
107  h_global_timing.GetListOfFunctions().Add(
108  ROOT.TNamed(
109  "Check",
110  """There should be no significant and persistent increases in
111  the the run time of the methods. Only cases where the increase
112  compared to the reference or previous versions persists for at
113  least two consecutive revisions should be reported since the
114  measurements can be influenced by load from other processes on
115  the execution host.""",
116  )
117  )
118  if contact:
119  h_global_timing.GetListOfFunctions().Add(
120  ROOT.TNamed("Contact", contact)
121  )
122  for (index, method) in statistics.StatisticCounters.values.items():
123  method_name[method] = str(method)[0] + str(method).lower()[1:].replace(
124  "_r", "R"
125  )
126  if index == 5:
127  break
128  h_global_timing.SetBinContent(
129  index + 1, statistics.get_global().time_mean(method) * 1e-6
130  )
131  h_global_timing.SetBinError(
132  index + 1, statistics.get_global().time_stddev(method) * 1e-6
133  )
134  h_global_timing.GetXaxis().SetBinLabel(index + 1, method_name[method])
135  h_global_timing.Write()
136 
137  # Timing per module for the different methods
138  modules = statistics.modules
139  h_module_timing = ROOT.TH1D(
140  prefix + "ModuleTiming", "Module Timing", len(modules), 0, len(modules)
141  )
142  h_module_timing.SetStats(0)
143  h_module_timing.GetXaxis().SetTitle("module")
144  h_module_timing.GetYaxis().SetTitle("time/call [ms]")
145  h_module_timing.GetListOfFunctions().Add(
146  ROOT.TNamed(
147  "Check",
148  """There should be no significant and persistent increases in
149  the run time of a module. Only cases where the increase compared
150  to the reference or previous versions persists for at least two
151  consecutive revisions should be reported since the measurements
152  can be influenced by load from other processes on the execution
153  host.""",
154  )
155  )
156  if contact:
157  h_module_timing.GetListOfFunctions().Add(
158  ROOT.TNamed("Contact", contact)
159  )
160  for method in timing_methods:
161  h_module_timing.SetTitle("Module %s Timing" % method_name[method])
162  h_module_timing.GetListOfFunctions().Add(
163  ROOT.TNamed(
164  "Description",
165  """The (average) execution time of the %s method of modules
166  for %s. The error bars show the rms of the time
167  distributions."""
168  % (method_name[method], job_desc),
169  )
170  )
171  index = 1
172  for modstat in modules:
173  h_module_timing.SetBinContent(
174  index, modstat.time_mean(method) * 1e-6
175  )
176  h_module_timing.SetBinError(
177  index, modstat.time_stddev(method) * 1e-6
178  )
179  h_module_timing.GetXaxis().SetBinLabel(index, modstat.name)
180  index += 1
181  h_module_timing.Write("{}{}Timing".format(prefix, method_name[method]))
182  h_module_timing.GetListOfFunctions().RemoveLast()
183 
184  # Memory usage profile
185  memory_profile = ROOT.Belle2.PyStoreObj("VirtualMemoryProfile", 1)
186  if memory_profile:
187  memory_profile.obj().GetListOfFunctions().Add(
188  ROOT.TNamed(
189  "Description",
190  f"The virtual memory usage vs. the event number for {job_desc}.",
191  )
192  )
193  memory_profile.obj().GetListOfFunctions().Add(
194  ROOT.TNamed(
195  "Check",
196  """The virtual memory usage should be flat for high event
197  numbers. If it keeps rising this is an indication of a memory
198  leak.<br>There should also be no significant increases with
199  respect to the reference (or previous revisions if no reference
200  exists).""",
201  )
202  )
203  if contact:
204  memory_profile.obj().GetListOfFunctions().Add(
205  ROOT.TNamed("Contact", contact)
206  )
207  memory_profile.obj().Write(prefix + "VirtualMemoryProfile")
208 
209  # Rss Memory usage profile
210  memory_profile = ROOT.Belle2.PyStoreObj("RssMemoryProfile", 1)
211  if memory_profile:
212  memory_profile.obj().GetListOfFunctions().Add(
213  ROOT.TNamed(
214  "Description",
215  f"The rss memory usage vs. the event number for {job_desc}.",
216  )
217  )
218  memory_profile.obj().GetListOfFunctions().Add(
219  ROOT.TNamed(
220  "Check",
221  """The rss memory usage should be flat for high event numbers.
222  If it keeps rising this is an indication of a memory
223  leak.<br>There should also be no significant increases with
224  respect to the reference (or previous revisions if no reference
225  exists). In the (rare) case that memory is swapped by the OS,
226  the rss memory usage can decrease.""",
227  )
228  )
229  if contact:
230  memory_profile.obj().GetListOfFunctions().Add(
231  ROOT.TNamed("Contact", contact)
232  )
233  memory_profile.obj().Write(prefix + "RssMemoryProfile")
234 
235  # Memory usage per module for the different methods
236  sqrt_n = 1 / math.sqrt(statistics.get_global().calls() - 1)
237  h_module_memory = ROOT.TH1D(
238  prefix + "ModuleMemory",
239  "Virtual Module Memory",
240  len(modules),
241  0,
242  len(modules),
243  )
244  h_module_memory.SetStats(0)
245  h_module_memory.GetXaxis().SetTitle("module")
246  h_module_memory.GetYaxis().SetTitle("memory increase/call [kB]")
247  h_module_memory.GetListOfFunctions().Add(
248  ROOT.TNamed(
249  "Description",
250  f"The (average) increase in virtual memory usage per call of the "
251  f"{method_name[method]} method of modules for {job_desc}.",
252  )
253  )
254  h_module_memory.GetListOfFunctions().Add(
255  ROOT.TNamed(
256  "Check",
257  "The increase in virtual memory usage per call for each module "
258  "should be consistent with zero or the reference.",
259  )
260  )
261  if contact:
262  h_module_memory.GetListOfFunctions().Add(
263  ROOT.TNamed("Contact", contact)
264  )
265  for method in memory_methds:
266  h_module_memory.SetTitle("Module %s Memory" % method_name[method])
267  index = 1
268  for modstat in modules:
269  h_module_memory.SetBinContent(index, modstat.memory_mean(method))
270  h_module_memory.SetBinError(
271  index, modstat.memory_stddev(method) * sqrt_n
272  )
273  h_module_memory.GetXaxis().SetBinLabel(index, modstat.name)
274  index += 1
275  h_module_memory.Write("{}{}Memory".format(prefix, method_name[method]))
276  h_module_memory.GetListOfFunctions().RemoveLast()
277 
278  if plot_file:
279  plot_file.Close()
280  save_dir.cd()
281 
282 
283 def event_timing_plot(
284  data_file,
285  file_name="",
286  max_time=20.0,
287  burn_in=1,
288  contact="",
289  job_desc="",
290  prefix="",
291 ):
292  """
293  Add a validation histogram of event execution time to the given root file.
294  The current root file will be used if the fileName is empty (default).
295  The data file has to contain the profile information created by the Profile
296  module.
297  """
298 
299  if not job_desc:
300  job_desc = os.path.basename(sys.argv[0])
301 
302  # Get histogram with time vs event number
303  save_dir = ROOT.gDirectory
304  data = ROOT.TFile.Open(data_file)
305  tree = data.Get("tree")
306  entries = tree.GetEntries()
307  tree.Draw(
308  "Entry$>>hEventTime(%d,-0.5,%d.5)" % (entries, entries - 1),
309  "ProfileInfo.m_timeInSec",
310  "goff",
311  )
312  # load the histogram created by the above Draw command
313  h_event_time = data.Get("hEventTime")
314  h_event_time.SetDirectory(0)
315  data.Close()
316  save_dir.cd()
317 
318  # Open plot file
319  plot_file = None
320  if file_name:
321  plot_file = ROOT.TFile.Open(file_name, "UPDATE")
322 
323  # Create and fill histogram with event execution time distribution
324  stat = ROOT.gStyle.GetOptStat()
325  ROOT.gStyle.SetOptStat(101110)
326  h_timing = ROOT.TH1D(prefix + "Timing", "Event Timing", 100, 0, max_time)
327  h_timing.UseCurrentStyle()
328  h_timing.GetXaxis().SetTitle("time [s]")
329  h_timing.GetYaxis().SetTitle("events")
330  h_timing.GetListOfFunctions().Add(
331  ROOT.TNamed(
332  "Description",
333  f"The distribution of event execution times for {job_desc}.",
334  )
335  )
336  h_timing.GetListOfFunctions().Add(
337  ROOT.TNamed(
338  "Check",
339  "The distribution should be consistent with the reference (or "
340  "previous revisions if no reference exists).",
341  )
342  )
343  if contact:
344  h_timing.GetListOfFunctions().Add(ROOT.TNamed("Contact", contact))
345  for event in range(1 + burn_in, entries + 1):
346  h_timing.Fill(
347  h_event_time.GetBinContent(event)
348  - h_event_time.GetBinContent(event - 1)
349  )
350  h_timing.Write()
351  ROOT.gStyle.SetOptStat(stat)
352 
353  if plot_file:
354  plot_file.Close()
355  save_dir.cd()
356 
357 
358 def draw_progress_bar(delete_lines: int, scripts: List[Script], barlength=50):
359  """
360  This function plots a progress bar of the validation, i.e. it shows which
361  percentage of the scripts has been executed yet.
362  It furthermore also shows which scripts are currently running, as well as
363  the total runtime of the validation.
364 
365  @param delete_lines: The amount of lines which need to be deleted before
366  we can redraw the progress bar
367  @param scripts: List of all Script obejcts
368  @param barlength: The length of the progess bar (in characters)
369  @return: The number of lines that were printed by this function call.
370  Usefule if this function is called repeatedly.
371  """
372 
373  # Get statistics: Number of finished scripts + number of scripts in total
374  finished_scripts = len(
375  [
376  _
377  for _ in scripts
378  if _.status
379  in [
383  ]
384  ]
385  )
386  all_scripts = len(scripts)
387  percent = 100.0 * finished_scripts / all_scripts
388 
389  # Get the runtime of the script
390  runtime = int(timeit.default_timer() - get_start_time())
391 
392  # Move the cursor up and clear lines
393  for i in range(delete_lines):
394  print("\x1b[2K \x1b[1A", end=" ")
395 
396  # Print the progress bar:
397  progressbar = ""
398  for i in range(barlength):
399  if i < int(barlength * percent / 100.0):
400  progressbar += "="
401  else:
402  progressbar += " "
403  print(
404  f"\x1b[0G[{progressbar}] {percent:6.1f}% "
405  f"({finished_scripts}/{all_scripts})"
406  )
407 
408  # Print the total runtime:
409  print(f"Runtime: {runtime}s")
410 
411  # Print the list of currently running scripts:
412  running = [
413  os.path.basename(__.path)
414  for __ in scripts
416  ]
417 
418  # If nothing is repeatedly running
419  if not running:
420  running = ["-"]
421 
422  print(f"Running: {running[0]}")
423  for __ in running[1:]:
424  print("{} {}".format(len("Running:") * " ", __))
425 
426  return len(running) + 2
427 
428 
430  """
431  This can be used to parse the execution intervals of validation scripts
432  and can check whether a script object is in the list of intervals
433  configured in this class.
434  """
435 
436  def __init__(self, intervals):
437  """
438  Initializes the IntervalSelector class with a list of intervals which
439  should be selected
440  """
441 
442 
443  self.intervalsintervals = [x.strip() for x in intervals]
444 
445  def in_interval(self, script_object: Script) -> bool:
446  """
447  checks whether the interval listed in a script object's header is
448  within the selected
449  """
450 
451  return script_object.interval in self.intervalsintervals
452 
453 
454 
457 
458 
459 # todo: [Ref, low prio, low work] Denote private methods with underscore
460 # /klieret
461 class Validation:
462 
463  """!
464  This is the class that provides all global variables, like 'list_of_files'
465  etc. There is only one instance of this class with name 'validation. This
466  allows to use some kind of namespace, i.e. global variables will always be
467  referenced as validation.[name of variable]. This makes it easier to
468  distinguish them from local variables that only exist within the scope of a
469  or a method.
470 
471  @var tag: The name of the folder within the results directory
472  @var log: Reference to the logging object for this validation instance
473  @var basepaths: The paths to the local and central release directory
474  @var scripts: List of all Script objects for steering files
475  @var packages: List of all packages which contributed scripts
476  @var basf2_options: The options to be given to the basf2 command
477  @var mode: Whether to run locally or on a cluster
478  @var quiet: No progress bar in quiet mode
479  @var dry: Dry runs do not actually start any scripts (for debugging)
480  """
481 
482  def __init__(self, tag="current"):
483  """!
484  The default constructor. Initializes all those variables that will be
485  globally accessible later on. Does not return anything.
486  """
487 
488  # The name which will be used to create a folder in the results
489  # directory. Default is 'current'.
490  self.tag = tag
491 
492  # This dictionary holds the paths to the local and central release dir
493  # (or 'None' if one of them does not exist)
494  self.basepaths = validationpath.get_basepath()
495 
496  # Folder used for the intermediate and final results of the validation
497  self.work_folder = os.path.abspath(os.getcwd())
498 
499  # The logging-object for the validation (Instance of the logging-
500  # module). Initialize the log as 'None' and then call the method
501  # 'create_log()' to create the actual log.
502  self.log = self.create_log()
503 
504  # The list which holds all steering file objects
505  # (as instances of class Script)
506  self.scripts: List[Script] = []
507 
508  # A list of all packages from which we have collected steering files
509  self.packages: List[str] = []
510 
511  # This list of packages which will be ignored by default. This is
512  # only the validation package itself, because it only creates
513  # test-plots for validation development. To see only the
514  # validation-package output, use the --test command line flag
515  self.ignored_packages = ["validation-test"]
516 
517  # Additional arguments for basf2, if we received any from the command
518  # line arguments
519  self.basf2_options = ""
520 
521  # A variable which holds the mode, i.e. 'local' for local
522  # multi-processing and 'cluster' for cluster usage
523  self.mode = None
524 
525  # Defines whether the validation is run in quiet mode, i.e. without
526  # the dynamic progress bar
527  self.quiet = False
528 
529  # Defines if a dry run is performed, i.e. a run where the steering
530  # files are not actually started (for debugging purposes)
531  self.dry = False
532 
533  # If this is set, dependencies will be ignored.
534  self.ignore_dependencies = False
535 
536  # : reporting time (in minutes)
537  # the time in minutes when there will
538  # be the first log output if a script is still not complete This
539  # prints every x minutes which scripts are still running
540  self.running_script_reporting_interval = 30
541 
542 
545  self.script_max_runtime_in_minutes = 60 * 5
546 
547 
548  self.parallel = None
549 
550  def get_useable_basepath(self):
551  """
552  Checks if a local path is available. If only a central release is
553  available, return the path to this central release
554  """
555  if self.basepaths["local"]:
556  return self.basepaths["local"]
557  else:
558  return self.basepaths["central"]
559 
560  @staticmethod
561  def get_available_job_control():
562  """
563  insert the possible backend controls, they will be checed via their
564  is_supported method if they actually can be executed in the current
565  environment
566  """
567  return [
568  localcontrol.Local,
572  ]
573 
574  @staticmethod
575  def get_available_job_control_names():
576  return [c.name() for c in Validation.get_available_job_control()]
577 
578  def build_dependencies(self):
579  """!
580  This method loops over all Script objects in self.scripts and
581  calls their compute_dependencies()-method.
582  @return: None
583  """
584  for script_object in self.scripts:
585  script_object.compute_dependencies(self.scripts)
586 
587  # Make sure dependent scripts of skipped scripts are skipped, too.
588  for script_object in self.scripts:
589  if script_object.status == ScriptStatus.skipped:
590  self.skip_script(
591  script_object, reason=f"Depends on '{script_object.path}'"
592  )
593 
594  def build_headers(self):
595  """!
596  This method loops over all Script objects in self.scripts and
597  calls their load_header()-method.
598  @return: None
599  """
600  for script_object in self.scripts:
601  script_object.load_header()
602 
603  def skip_script(self, script_object, reason=""):
604  """!
605  This method sets the status of the given script and all dependent ones
606  to 'skipped'.
607  @param script_object: Script object to be skipped.
608  @param reason: Reason for skipping object
609  @return: None
610  """
611  # Print a warning if the status of the script is changed and then
612  # set it to 'skipped'.
613  if script_object.status not in [
616  ]:
617  self.log.warning("Skipping " + script_object.path)
618  if reason:
619  self.log.debug(f"Reason for skipping: {reason}.")
620  script_object.status = ScriptStatus.skipped
621 
622  # Also skip all dependent scripts.
623  for dependent_script in self.scripts:
624  if script_object in dependent_script.dependencies:
625  self.skip_script(
626  dependent_script,
627  reason=f"Depends on '{script_object.path}'",
628  )
629 
630  def create_log(self) -> logging.Logger:
631  """!
632  Create the logger.
633  We use the logging module to create an object which allows us to
634  comfortably log everything that happens during the execution of
635  this script and even have different levels of importance, such as
636  'ERROR' or 'DEBUG'.
637  @return: None
638  """
639  # Create the log and set its default level to DEBUG, which means that
640  # it will store _everything_.
641  log = logging.getLogger("validate_basf2")
642  log.setLevel(logging.DEBUG)
643 
644  # Now we add another custom level 'NOTE'. This is because we don't
645  # want to print ERRORs and WARNINGs to the console output, therefore
646  # we need a higher level.
647  # We define the new level and tell 'log' what to do when we use it
648  logging.NOTE = 100
649  logging.addLevelName(logging.NOTE, "NOTE")
650  log.note = lambda msg, *args: log._log(logging.NOTE, msg, args)
651 
652  # Set up the console handler. The console handler will redirect a
653  # certain subset of all log message (i.e. those with level 'NOTE') to
654  # the command line (stdout), so we know what's going on when we
655  # execute the validation.
656 
657  # Define the handler and its level (=NOTE)
658  console_handler = logging.StreamHandler()
659  console_handler.setLevel(logging.NOTE)
660 
661  # Format the handler. We only need the message, no date/time etc.
662  console_format = logging.Formatter("%(message)s")
663  console_handler.setFormatter(console_format)
664 
665  # Add the console handler to log
666  log.addHandler(console_handler)
667 
668  # Now set up the file handler. The file handler will redirect
669  # _everything_ we log to a logfile so that we have all possible
670  # information available for debugging later.
671 
672  # Make sure the folder for the log file exists
673  log_dir = self.get_log_folder()
674  if not os.path.exists(log_dir):
675  print("Creating " + log_dir)
676  os.makedirs(log_dir)
677 
678  # Define the handler and its level (=DEBUG to get everything)
679  file_handler = logging.FileHandler(
680  os.path.join(log_dir, "validate_basf2.log"), "w+"
681  )
682  file_handler.setLevel(logging.DEBUG)
683 
684  # Format the handler. We want the datetime, the module that produced
685  # the message, the LEVEL of the message and the message itself
686  file_format = logging.Formatter(
687  "%(asctime)s - %(module)s - " "%(levelname)s - %(message)s",
688  datefmt="%Y-%m-%d %H:%M:%S",
689  )
690  file_handler.setFormatter(file_format)
691 
692  # Add the file handler to log
693  log.addHandler(file_handler)
694  return log
695 
696  def collect_steering_files(self, interval_selector):
697  """!
698  This function will collect all steering files from the local and
699  central release directory.
700  @return: None
701  """
702 
703  # Get all folders that contain steering files, first the local ones
704  validation_folders = get_validation_folders(
705  "local", self.basepaths, self.log
706  )
707 
708  # Then add those central folders that do not have a local match
709  for (package, folder) in get_validation_folders(
710  "central", self.basepaths, self.log
711  ).items():
712  if package not in validation_folders:
713  validation_folders[package] = folder
714 
715  # remove packages which have been explicitly ignored
716  for ignored in self.ignored_packages:
717  if ignored in validation_folders:
718  del validation_folders[ignored]
719 
720  # Now write to self.packages which packages we have collected
721  self.packages = list(validation_folders.keys())
722 
723  # Finally, we collect the steering files from each folder we have
724  # collected:
725  for (package, folder) in validation_folders.items():
726 
727  # Collect only *.C and *.py files
728  c_files = scripts_in_dir(folder, self.log, ".C")
729  py_files = scripts_in_dir(folder, self.log, ".py")
730  for steering_file in c_files + py_files:
731  script = Script(steering_file, package, self.log)
732  script.load_header()
733  # only select this script, if this interval has been selected
734  if (
735  interval_selector.in_interval(script)
736  and not script.noexecute
737  ):
738  self.scripts.append(script)
739 
740  # Thats it, now there is a complete list of all steering files on
741  # which we are going to perform the validation in self.scripts
742 
743  def get_log_folder(self):
744  """!
745  Get the log folder for this validation run. The command log
746  files (successful, failed) scripts will be recorded there
747  """
748  return validationpath.get_results_tag_folder(self.work_folder, self.tag)
749 
750  def log_failed(self):
751  """!
752  This method logs all scripts with property failed into a single file
753  to be read in run_validation_server.py
754  """
755 
756  failed_log_path = os.path.join(
757  self.get_log_folder(), "list_of_failed_scripts.log"
758  )
759  self.log.note(f"Writing list of failed scripts to {failed_log_path}.")
760 
761  # Select only failed scripts
762  failed_scripts = [
763  script
764  for script in self.scripts
765  if script.status == ScriptStatus.failed
766  ]
767 
768  with open(failed_log_path, "w+") as list_failed:
769  # log the name of all failed scripts
770  for script in failed_scripts:
771  list_failed.write(script.path.split("/")[-1] + "\n")
772 
773  def log_skipped(self):
774  """!
775  This method logs all scripts with property skipped into a single file
776  to be read in run_validation_server.py
777  """
778 
779  skipped_log_path = os.path.join(
780  self.get_log_folder(), "list_of_skipped_scripts.log"
781  )
782  self.log.note(f"Writing list of skipped scripts to {skipped_log_path}.")
783 
784  # Select only failed scripts
785  skipped_scripts = [
786  script
787  for script in self.scripts
788  if script.status == ScriptStatus.skipped
789  ]
790 
791  with open(skipped_log_path, "w+") as list_skipped:
792  # log the name of all failed scripts
793  for script in skipped_scripts:
794  list_skipped.write(script.path.split("/")[-1] + "\n")
795 
796  def report_on_scripts(self):
797  """!
798  Print a summary about all scripts, especially highlighting
799  skipped and failed scripts.
800  """
801 
802  failed_scripts = [
803  script.package + "/" + script.name
804  for script in self.scripts
805  if script.status == ScriptStatus.failed
806  ]
807  skipped_scripts = [
808  script.package + "/" + script.name
809  for script in self.scripts
810  if script.status == ScriptStatus.skipped
811  ]
812 
813  self.log.note("")
814  self.log.note(
815  terminal_title_line("Summary of script execution", level=0)
816  )
817  self.log.note(f"Total number of scripts: {len(self.scripts)}")
818  self.log.note("")
819  if skipped_scripts:
820  self.log.note(
821  "{}/{} scripts were skipped".format(
822  len(skipped_scripts), len(self.scripts)
823  )
824  )
825  for s in skipped_scripts:
826  self.log.note(f"* {s}")
827  self.log.note("")
828  else:
829  self.log.note("No scripts were skipped. Nice!")
830  self.log.note("")
831 
832  if failed_scripts:
833  self.log.note(
834  "{}/{} scripts failed".format(
835  len(failed_scripts), len(self.scripts)
836  )
837  )
838  for s in failed_scripts:
839  self.log.note(f"* {s}")
840  self.log.note("")
841  else:
842  self.log.note("No scripts failed. Nice!")
843  self.log.note("")
844 
845  print(
847  total=len(self.scripts),
848  failure=len(failed_scripts) + len(skipped_scripts),
849  )
850  )
851 
852  def set_runtime_data(self):
853  """!
854  This method sets runtime property of each script.
855  """
856 
857  run_times = {}
858  path = validationpath.get_results_runtime_file(self.work_folder)
859  with open(path) as runtimes:
860 
861  # Get our data
862  for line in runtimes:
863  run_times[line.split("=")[0].strip()] = line.split("=")[
864  1
865  ].strip()
866 
867  # And try to set a property for each script
868  for script in self.scripts:
869  try:
870  script.runtime = float(run_times[script.name])
871  # If we don't have runtime data, then set it to an average of
872  # all runtimes
873  except KeyError:
874  suma = 0.0
875  for dict_key in run_times:
876  suma += float(run_times[dict_key])
877  script.runtime = suma / len(run_times)
878 
879  def get_script_by_name(self, name: str) -> Optional[Script]:
880  """!
881 
882  """
883 
884  l_arr = [s for s in self.scripts if s.name == name]
885  if len(l_arr) == 1:
886  return l_arr[0]
887  else:
888  return None
889 
890  def apply_package_selection(
891  self, selected_packages, ignore_dependencies=False
892  ):
893  """!
894  Only select packages from a specific set of packages, but still
895  honor the dependencies to outside scripts which may exist
896  """
897 
898  to_keep_dependencies = set()
899 
900  # compile the dependencies of selected scripts
901  # todo: won't work for nested dependencies
902  if not ignore_dependencies:
903  for script_obj in self.scripts:
904  if script_obj.package in selected_packages:
905  for dep in script_obj.dependencies:
906  to_keep_dependencies.add(dep.unique_name())
907  # now, remove all scripts from the script list, which are either
908  # not in the selected packages or have a dependency to them
909  self.scripts = [
910  s
911  for s in self.scripts
912  if (s.package in selected_packages)
913  or (s.unique_name() in to_keep_dependencies)
914  ]
915 
916  # Check if some of the selected_packages were not found.
917  packages = {s.package for s in self.scripts}
918  packages_not_found = list(set(selected_packages) - packages)
919  if packages_not_found:
920  msg = (
921  "You asked to select the package(s) {}, but they were not "
922  "found.".format(", ".join(packages_not_found))
923  )
924  self.log.note(msg)
925  self.log.warning(msg)
926 
927  def apply_script_selection(
928  self, script_selection, ignore_dependencies=False
929  ):
930  """!
931  This method will take the validation file name ( e.g.
932  "FullTrackingValidation.py" ), determine all the script it depends on
933  and set the status of theses scripts to "waiting", The status of all
934  other scripts will be set to "skipped", which means they will not be
935  executed in the validation run. If ignore_dependencies is True,
936  dependencies will also be set to "skipped".
937  """
938 
939  # change file extension
940  script_selection = [
941  Script.sanitize_file_name(s) for s in script_selection
942  ]
943 
944  scripts_to_enable = set()
945 
946  # find the dependencies of each selected script
947  for script in script_selection:
948  scripts_to_enable.add(script)
949  script_obj = self.get_script_by_name(script)
950 
951  if script_obj is None:
952  self.log.error(
953  f"Script with name {script} cannot be found, skipping for "
954  f"selection"
955  )
956  continue
957 
958  others = script_obj.get_recursive_dependencies(self.scripts)
959  if not ignore_dependencies:
960  scripts_to_enable = scripts_to_enable.union(others)
961 
962  # enable all selections and dependencies
963  for script_obj in self.scripts:
964  if script_obj.name in scripts_to_enable:
965  self.log.warning(
966  f"Enabling script {script_obj.name} because it was "
967  f"selected or a selected script depends on it."
968  )
969  script_obj.status = ScriptStatus.waiting
970  else:
971  self.log.warning(
972  f"Disabling script {script_obj.name} because it was "
973  f"not selected."
974  )
975  script_obj.status = ScriptStatus.skipped
976 
977  # Check if some of the selected_packages were not found.
978  script_names = {Script.sanitize_file_name(s.name) for s in self.scripts}
979  scripts_not_found = set(script_selection) - script_names
980  if scripts_not_found:
981  msg = (
982  "You requested script(s) {}, but they seem to not have "
983  "been found.".format(", ".join(scripts_not_found))
984  )
985  self.log.note(msg)
986  self.log.warning(msg)
987 
988  def apply_script_caching(self):
989  cacheable_scripts = [s for s in self.scripts if s.is_cacheable]
990 
991  output_dir_datafiles = validationpath.get_results_tag_folder(
992  self.work_folder, self.tag
993  )
994 
995  for s in cacheable_scripts:
996  # for for all output files
997  outfiles = s.output_files
998  files_exist = True
999  for of in outfiles:
1000  full_path = os.path.join(output_dir_datafiles, of)
1001  files_exist = files_exist and os.path.isfile(full_path)
1002 
1003  if files_exist:
1004  s.status = ScriptStatus.cached
1005 
1006  # Remove all cached scripts from the dependencies
1007  # of dependent script objects, they will not be
1008  # executed and no one needs to wait for them
1009  for script in self.scripts:
1010  for dep_script in script.dependencies:
1011  # check if the script this one is depending on is
1012  # in cached execution
1013  if dep_script.status == ScriptStatus.cached:
1014  script.dependencies.remove(dep_script)
1015 
1016  def store_run_results_json(self, git_hash):
1017 
1018  # retrieve the git hash which was used for executing this validation
1019  # scripts
1020  json_package = []
1021  for p in self.packages:
1022  this_package_scrits = [s for s in self.scripts if s.package == p]
1023  json_scripts = [s.to_json(self.tag) for s in this_package_scrits]
1024 
1025  # count the failed scripts
1026  fail_count = sum(
1027  [s.status == ScriptStatus.failed for s in this_package_scrits]
1028  )
1029  json_package.append(
1031  p, scriptfiles=json_scripts, fail_count=fail_count
1032  )
1033  )
1034 
1035  # todo: assign correct color here
1036  rev = json_objects.Revision(
1037  label=self.tag,
1038  creation_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
1039  creation_timezone=validationfunctions.get_timezone(),
1040  packages=json_package,
1041  git_hash=git_hash,
1042  )
1045  self.work_folder, self.tag
1046  ),
1047  rev,
1048  )
1049 
1050  def add_script(self, script: Script):
1051  """!
1052  Explictly add a script object. In normal operation, scripts are
1053  auto-discovered but this method is useful for testing
1054  """
1055 
1056  self.scripts.append(script)
1057 
1058  @staticmethod
1059  def sort_scripts(script_list: List[Script]):
1060  """
1061  Sort the list of scripts that have to be processed by runtime,
1062  execute slow scripts first If no runtime information is available
1063  from the last execution, run the scripts in the validation package
1064  first because they are log running and used as input for other scripts
1065  """
1066  script_list.sort(
1067  key=lambda x: x.runtime or x.package == "validation", reverse=True
1068  )
1069 
1070  # todo: if you have to indent by 9 tabs, you know that it's time to refactor /klieret
1071  def run_validation(self):
1072  """!
1073  This method runs the actual validation, i.e. it loops over all
1074  scripts, checks which of them are ready for execution, and runs them.
1075  @return: None
1076  """
1077 
1078  # Use the local execution for all plotting scripts
1079  self.log.note("Initializing local job control for plotting.")
1080  local_control = localcontrol.Local(
1081  max_number_of_processes=self.parallel
1082  )
1083 
1084  # Depending on the selected mode, load either the controls for the
1085  # cluster or for local multi-processing
1086 
1087  self.log.note("Selecting job control for all other jobs.")
1088 
1089  selected_controls = [
1090  c for c in self.get_available_job_control() if c.name() == self.mode
1091  ]
1092 
1093  if not len(selected_controls) == 1:
1094  print(f"Selected mode {self.mode} does not exist")
1095  sys.exit(1)
1096 
1097  selected_control = selected_controls[0]
1098 
1099  self.log.note(
1100  "Controller: {} ({})".format(
1101  selected_control.name(), selected_control.description()
1102  )
1103  )
1104 
1105  if not selected_control.is_supported():
1106  print(f"Selected mode {self.mode} is not supported on your system")
1107  sys.exit(1)
1108 
1109  # instantiate the selected job control backend
1110  if selected_control.name() == "local":
1111  control = selected_control(max_number_of_processes=self.parallel)
1112  else:
1113  control = selected_control()
1114 
1115  # read the git hash which is used to produce this validation
1116  src_basepath = self.get_useable_basepath()
1117  git_hash = validationfunctions.get_compact_git_hash(src_basepath)
1118  self.log.debug(
1119  f"Git hash of repository located at {src_basepath} is {git_hash}"
1120  )
1121 
1122  # todo: perhaps we want to have these files in the results folder, don't we? /klieret
1123  # If we do have runtime data, then read them
1124  if (
1125  os.path.exists("./runtimes.dat")
1126  and os.stat("./runtimes.dat").st_size
1127  ):
1128  self.set_runtime_data()
1129  if os.path.exists("./runtimes-old.dat"):
1130  # If there is an old data backup, delete it, we backup only
1131  # one run
1132  os.remove("./runtimes-old.dat")
1133  if self.mode == "local":
1134  # Backup the old data file
1135  shutil.copyfile("./runtimes.dat", "./runtimes-old.dat")
1136 
1137  # Open runtimes log and start logging, but log only if we are
1138  # running in the local mode
1139  if self.mode == "local":
1140  runtimes = open("./runtimes.dat", "w+")
1141 
1142  if not self.quiet:
1143  # This variable is needed for the progress bar function
1144  progress_bar_lines = 0
1145  print()
1146 
1147  # The list of scripts that have to be processed
1148  remaining_scripts = [
1149  script
1150  for script in self.scripts
1151  if script.status == ScriptStatus.waiting
1152  ]
1153 
1154  # Sort the list of scripts that have to be processed by runtime,
1155  # execute slow scripts first
1156  self.sort_scripts(remaining_scripts)
1157 
1158  def handle_finished_script(script_obj: Script):
1159  # Write to log that the script finished
1160  self.log.debug("Finished: " + script_obj.path)
1161 
1162  # If we are running locally, log a runtime
1163  script_obj.runtime = time.time() - script_obj.start_time
1164  if self.mode == "local":
1165  runtimes.write(
1166  script_obj.name + "=" + str(script_obj.runtime) + "\n"
1167  )
1168 
1169  # Check for the return code and set variables accordingly
1170  script_obj.status = ScriptStatus.finished
1171  script_obj.returncode = result[1]
1172  if result[1] != 0:
1173  script_obj.status = ScriptStatus.failed
1174  self.log.warning(
1175  f"exit_status was {result[1]} for {script_obj.path}"
1176  )
1177  script_obj.remove_output_files()
1178 
1179  # Skip all dependent scripts
1180  self.skip_script(
1181  script_obj,
1182  reason="Script '{}' failed and we set it's status to "
1183  "skipped so that all dependencies are "
1184  "also skipped.".format(script_object.path),
1185  )
1186 
1187  else:
1188  # Remove this script from the dependencies of dependent
1189  # script objects
1190  for dependent_script in remaining_scripts:
1191  if script_obj in dependent_script.dependencies:
1192  dependent_script.dependencies.remove(script_obj)
1193 
1194  # Some printout in quiet mode
1195  if self.quiet:
1196  waiting = [
1197  script
1198  for script in remaining_scripts
1199  if script.status == ScriptStatus.waiting
1200  ]
1201  running = [
1202  script
1203  for script in remaining_scripts
1204  if script.status == ScriptStatus.running
1205  ]
1206  print(
1207  "Finished [{},{}]: {} -> {}".format(
1208  len(waiting),
1209  len(running),
1210  script_obj.path,
1211  script_obj.status,
1212  )
1213  )
1214 
1215  def handle_unfinished_script(script_obj: Script):
1216  if (
1217  time.time() - script_obj.last_report_time
1218  ) / 60.0 > self.running_script_reporting_interval:
1219  print(
1220  "Script {} running since {} seconds".format(
1221  script_obj.name_not_sanitized,
1222  time.time() - script_obj.start_time,
1223  )
1224  )
1225  # explicit flush so this will show up in log file right away
1226  sys.stdout.flush()
1227 
1228  # not finished yet, log time
1229  script_obj.last_report_time = time.time()
1230 
1231  # check for the maximum time a script is allow to run and
1232  # terminate if exceeded
1233  total_runtime_in_minutes = (
1234  time.time() - script_obj.start_time
1235  ) / 60.0
1236  if (
1237  total_runtime_in_minutes
1238  > self.script_max_runtime_in_minutes
1239  > 0
1240  ):
1241  script_obj.status = ScriptStatus.failed
1242  self.log.warning(
1243  f"Script {script_obj.path} did not finish after "
1244  f"{total_runtime_in_minutes} minutes, attempting to "
1245  f"terminate. "
1246  )
1247  # kill the running process
1248  script_obj.control.terminate(script_obj)
1249  # Skip all dependent scripts
1250  self.skip_script(
1251  script_obj,
1252  reason=f"Script '{script_object.path}' did not finish in "
1253  f"time, so we're setting it to 'failed' so that all "
1254  f"dependent scripts will be skipped.",
1255  )
1256 
1257  def handle_waiting_script(script_obj: Script):
1258  # Determine the way of execution depending on whether
1259  # data files are created
1260  if script_obj.output_files:
1261  script_obj.control = control
1262  else:
1263  script_obj.control = local_control
1264 
1265  # Do not spawn processes if there are already too many!
1266  if script_obj.control.available():
1267 
1268  # Write to log which script is being started
1269  self.log.debug("Starting " + script_obj.path)
1270 
1271  # Set script object variables accordingly
1272  if script_obj.status == ScriptStatus.failed:
1273  self.log.warning(f"Starting of {script_obj.path} failed")
1274  else:
1275  script_obj.status = ScriptStatus.running
1276 
1277  # Actually start the script execution
1278  script_obj.control.execute(
1279  script_obj, self.basf2_options, self.dry, self.tag
1280  )
1281 
1282  # Log the script execution start time
1283  script_obj.start_time = time.time()
1284  script_obj.last_report_time = time.time()
1285 
1286  # Some printout in quiet mode
1287  if self.quiet:
1288  waiting = [
1289  _
1290  for _ in remaining_scripts
1291  if _.status == ScriptStatus.waiting
1292  ]
1293  running = [
1294  _
1295  for _ in remaining_scripts
1296  if _.status == ScriptStatus.running
1297  ]
1298  print(
1299  "Started [{},{}]: {}".format(
1300  len(waiting), len(running), script_obj.path
1301  )
1302  )
1303 
1304  # While there are scripts that have not yet been executed...
1305  while remaining_scripts:
1306 
1307  # Loop over all steering files / Script objects
1308  for script_object in remaining_scripts:
1309 
1310  # If the script is currently running
1311  if script_object.status == ScriptStatus.running:
1312 
1313  # Check if the script has finished:
1314  result = script_object.control.is_job_finished(
1315  script_object
1316  )
1317 
1318  # If it has finished:
1319  if result[0]:
1320  handle_finished_script(script_object)
1321  else:
1322  handle_unfinished_script(script_object)
1323 
1324  # Otherwise (the script is waiting) and if it is ready to be
1325  # executed
1326  elif not script_object.dependencies:
1327  handle_waiting_script(script_object)
1328 
1329  # Update the list of scripts that have to be processed
1330  remaining_scripts = [
1331  script
1332  for script in remaining_scripts
1333  if script.status in [ScriptStatus.waiting, ScriptStatus.running]
1334  ]
1335 
1336  # Sort them again, Justin Case
1337  self.sort_scripts(remaining_scripts)
1338 
1339  # Wait for one second before starting again
1340  time.sleep(1)
1341 
1342  # If we are not running in quiet mode, draw the progress bar
1343  if not self.quiet:
1344  progress_bar_lines = draw_progress_bar(
1345  progress_bar_lines, self.scripts
1346  )
1347 
1348  # Log failed and skipped scripts
1349  self.log_failed()
1350  self.log_skipped()
1351 
1352  # And close the runtime data file
1353  if self.mode == "local":
1354  runtimes.close()
1355  print()
1356 
1357  self.store_run_results_json(git_hash)
1358  # todo: update list of available revisions with the current run
1359 
1360  def create_plots(self):
1361  """!
1362  This method prepares the html directory for the plots if necessary
1363  and creates the plots that include the results from this validation.
1364  @return: None
1365  """
1366 
1367  html_folder = validationpath.get_html_folder(self.work_folder)
1368  results_folder = validationpath.get_results_folder(self.work_folder)
1369 
1370  os.makedirs(html_folder, exist_ok=True)
1371 
1372  if not os.path.exists(results_folder):
1373  self.log.error(
1374  f"Folder {results_folder} not found in "
1375  f"the work directory {self.work_folder}, please run "
1376  f"b2validation first"
1377  )
1378 
1379  validationplots.create_plots(force=True, work_folder=self.work_folder)
1380 
1381 
1382 def execute(tag=None, is_test=None):
1383  """!
1384  Parses the command line and executes the full validation suite
1385  :param tag The name that will be used for the current revision.
1386  Default None means automatic.
1387  :param is_test Run in test mode? Default None means that we read this
1388  from the command line arguments (which default to False).
1389  :returns None
1390  """
1391 
1392  # Note: Do not test tag and is_test, but rather cmd_arguments.tag
1393  # and cmd_arguments.is_test!
1394  # Also note that we modify some cmd_arguments below
1395  # (e.g. cmd_arguments.packages is updated if cmd_arguments.test is
1396  # specified).
1397 
1398  # If there is no release of basf2 set up, we can stop the execution
1399  # right here!
1400  if (
1401  os.environ.get("BELLE2_RELEASE_DIR", None) is None
1402  and os.environ.get("BELLE2_LOCAL_DIR", None) is None
1403  ):
1404  sys.exit("Error: No basf2 release set up!")
1405 
1406  # Otherwise we can start the execution. The mainpart is wrapped in a
1407  # try/except-contruct to fetch keyboard interrupts
1408  # fixme: except instructions make only sense after Validation obj is
1409  # initialized ==> Pull everything until there out of try statement
1410  try:
1411 
1412  # Now we process the command line arguments.
1413  # First of all, we read them in:
1414  cmd_arguments = parse_cmd_line_arguments(
1415  modes=Validation.get_available_job_control_names()
1416  )
1417 
1418  # overwrite with default settings with parameters give in method
1419  # call
1420  if tag is not None:
1421  cmd_arguments.tag = tag
1422  if is_test is not None:
1423  cmd_arguments.test = is_test
1424 
1425  # Create the validation object.
1426  validation = Validation(cmd_arguments.tag)
1427 
1428  # Write to log that we have started the validation process
1429  validation.log.note("Starting validation...")
1430  validation.log.note(
1431  f'Results will stored in a folder named "{validation.tag}"...'
1432  )
1433  validation.log.note(
1434  "The (full) log file(s) can be found at {}".format(
1435  ", ".join(get_log_file_paths(validation.log))
1436  )
1437  )
1438  validation.log.note(
1439  "Please check these logs when encountering "
1440  "unexpected results, as most of the warnings and "
1441  "errors are not written to stdout/stderr."
1442  )
1443 
1444  # Check if we received additional arguments for basf2
1445  if cmd_arguments.options:
1446  validation.basf2_options = " ".join(cmd_arguments.options)
1447  validation.log.note(
1448  f"Received arguments for basf2: {validation.basf2_options}"
1449  )
1450 
1451  # Check if we are using the cluster or local multiprocessing:
1452  validation.mode = cmd_arguments.mode
1453 
1454  # Set if we have a limit on the maximum number of local processes
1455  validation.parallel = cmd_arguments.parallel
1456 
1457  # Check if we are running in quiet mode (no progress bar)
1458  if cmd_arguments.quiet:
1459  validation.log.note("Running in quiet mode (no progress bar).")
1460  validation.quiet = True
1461 
1462  # Check if we are performing a dry run (don't actually start scripts)
1463  if cmd_arguments.dry:
1464  validation.log.note(
1465  "Performing a dry run; no scripts will be " "started."
1466  )
1467  validation.dry = True
1468 
1469  # If running in test mode, only execute scripts in validation packgase
1470  if cmd_arguments.test:
1471  validation.log.note("Running in test mode")
1472  validation.ignored_packages = []
1473  cmd_arguments.packages = ["validation-test"]
1474 
1475  validation.log.note(
1476  "Release Folder: {}".format(validation.basepaths["central"])
1477  )
1478  validation.log.note(
1479  "Local Folder: {}".format(validation.basepaths["local"])
1480  )
1481 
1482  # Now collect the steering files which will be used in this validation.
1483  validation.log.note("Collecting steering files...")
1484  intervals = cmd_arguments.intervals.split(",")
1485  validation.collect_steering_files(IntervalSelector(intervals))
1486 
1487  # Build headers for every script object we have created
1488  validation.log.note("Building headers for Script objects...")
1489  validation.build_headers()
1490 
1491  # Build dependencies for every script object we have created,
1492  # unless we're asked to ignore them.
1493  if not cmd_arguments.select_ignore_dependencies:
1494  validation.log.note("Building dependencies for Script objects...")
1495  validation.build_dependencies()
1496 
1497  if cmd_arguments.packages:
1498  validation.log.note(
1499  "Applying package selection for the following package(s): "
1500  + ", ".join(cmd_arguments.packages)
1501  )
1502  validation.apply_package_selection(cmd_arguments.packages)
1503 
1504  # select only specific scripts, if this option has been set
1505  if cmd_arguments.select:
1506  validation.log.note("Applying selection for validation scripts")
1507  validation.apply_script_selection(
1508  cmd_arguments.select, ignore_dependencies=False
1509  )
1510 
1511  # select only specific scripts and ignore their dependencies if
1512  # option is set
1513  if cmd_arguments.select_ignore_dependencies:
1514  validation.log.note(
1515  "Applying selection for validation scripts, "
1516  "ignoring their dependencies"
1517  )
1518  validation.apply_script_selection(
1519  cmd_arguments.select_ignore_dependencies,
1520  ignore_dependencies=True,
1521  )
1522 
1523  # check if the scripts which are cacheable can be skipped, because
1524  # their output is already available
1525  if cmd_arguments.use_cache:
1526  validation.log.note("Checking for cached script output")
1527  validation.apply_script_caching()
1528 
1529  # Allow to change the maximal run time of the scripts
1530  if cmd_arguments.max_run_time is not None:
1531  if cmd_arguments.max_run_time > 0:
1532  validation.log.note(
1533  f"Setting maximal run time of the steering files "
1534  f"to {cmd_arguments.max_run_time} minutes."
1535  )
1536  else:
1537  validation.log.note(
1538  "Disabling run time limitation of steering files as "
1539  "requested (max run time set to <= 0)."
1540  )
1541  validation.script_max_runtime_in_minutes = (
1542  cmd_arguments.max_run_time
1543  )
1544 
1545  # Start the actual validation
1546  validation.log.note("Starting the validation...")
1547  validation.run_validation()
1548 
1549  # Log that the validation has finished and that we are creating plots
1550  validation.log.note("Validation finished...")
1551  if not validation.dry:
1552  validation.log.note("Start creating plots...")
1553  validation.create_plots()
1554  validation.log.note("Plots have been created...")
1555  # send mails
1556  if cmd_arguments.send_mails:
1557  mails = mail_log.Mails(validation)
1558  validation.log.note("Start sending mails...")
1559  # send mails to all users with failed scripts/comparison
1560  if cmd_arguments.send_mails_mode == "incremental":
1561  incremental = True
1562  elif cmd_arguments.send_mails_mode == "full":
1563  incremental = False
1564  else:
1565  incremental = None
1566  mails.send_all_mails(incremental=incremental)
1567  validation.log.note(
1568  "Save mail data to {}".format(validation.get_log_folder())
1569  )
1570  # save json with data about outgoing mails
1571  mails.write_log()
1572  else:
1573  validation.log.note(
1574  "Skipping plot creation and mailing " "(dry run)..."
1575  )
1576 
1577  validation.report_on_scripts()
1578 
1579  # Log that everything is finished
1580  validation.log.note(
1581  "Validation finished! Total runtime: {}s".format(
1582  int(timeit.default_timer() - get_start_time())
1583  )
1584  )
1585 
1586  if cmd_arguments.view:
1587  # run local webserver
1588  validationserver.run_server(open_site=True)
1589 
1590  except KeyboardInterrupt:
1591  validation.log.note("Validation terminated by user!")
Provides functionality to send mails in case of failed scripts / validation plots.
Definition: mail_log.py:45
intervals
stores the intervals which have been selected
Definition: validation.py:443
bool in_interval(self, Script script_object)
Definition: validation.py:445
def __init__(self, intervals)
Definition: validation.py:436
def dump(file_name, obj)
Optional[str] get_compact_git_hash(str repo_folder)
str congratulator(Optional[Union[int, float]] success=None, Optional[Union[int, float]] failure=None, Optional[Union[int, float]] total=None, just_comment=False, rate_name="Success rate")
def get_results_tag_folder(output_base_dir, tag)
def get_results_tag_revision_file(output_base_dir, tag)
def get_html_folder(output_base_dir)
def get_results_folder(output_base_dir)
def get_results_runtime_file(output_base_dir)
def create_plots(revisions=None, force=False, Optional[Queue] process_queue=None, work_folder=".")
def run_server(ip="127.0.0.1", port=8000, parse_command_line=False, open_site=False, dry_run=False)