Belle II Software development
validation.py
1#!/usr/bin/env python3
2
3
10
11# basf2 specific imports
12from basf2 import statistics
13from ROOT import PyConfig
14
15PyConfig.IgnoreCommandLineOptions = True # noqa
16import ROOT
17
18# Normal library imports
19import math
20import logging
21import os
22import timeit
23import sys
24import time
25import shutil
26import datetime
27from typing import List, Optional
28
29import json_objects
30import mail_log
31
32# A pretty printer. Prints prettier lists, dicts, etc. :)
33import pprint
34
35from validationscript import Script, ScriptStatus
36from validationfunctions import (
37 get_start_time,
38 get_validation_folders,
39 scripts_in_dir,
40 parse_cmd_line_arguments,
41 get_log_file_paths,
42 terminal_title_line,
43)
44import validationfunctions
45
46import validationserver
47import validationplots
48import validationscript
49import validationpath
50
51# local and cluster control backends
52import localcontrol
53import clustercontrol
54import clustercontrolsge
55import clustercontroldrmaa
56
57
58pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
59
60
61def statistics_plots(
62 file_name="",
63 timing_methods=None,
64 memory_methds=None,
65 contact="",
66 job_desc="",
67 prefix="",
68):
69 """
70 Add memory usage and execution time validation plots to the given root
71 file. The current root file will be used if the fileName is empty (
72 default).
73 """
74
75 if not timing_methods:
76 timing_methods = [statistics.INIT, statistics.EVENT]
77 if not memory_methds:
78 memory_methds = [statistics.INIT, statistics.EVENT]
79
80 # Open plot file
81 save_dir = ROOT.gDirectory
82 plot_file = None
83 if file_name:
84 plot_file = ROOT.TFile.Open(file_name, "UPDATE")
85
86 if not job_desc:
87 job_desc = sys.argv[1]
88
89 # Global timing
90 method_name = {}
91 h_global_timing = ROOT.TH1D(
92 prefix + "GlobalTiming", "Global Timing", 5, 0, 5
93 )
94 h_global_timing.SetStats(0)
95 h_global_timing.GetXaxis().SetTitle("method")
96 h_global_timing.GetYaxis().SetTitle("time/call [ms]")
97 h_global_timing.GetListOfFunctions().Add(
98 ROOT.TNamed(
99 "Description",
100 f"""The (average) time of the different basf2 execution phases
101 for {job_desc}. The error bars show the rms of the time
102 distributions.""",
103 )
104 )
105 h_global_timing.GetListOfFunctions().Add(
106 ROOT.TNamed(
107 "Check",
108 """There should be no significant and persistent increases in
109 the the run time of the methods. Only cases where the increase
110 compared to the reference or previous versions persists for at
111 least two consecutive revisions should be reported since the
112 measurements can be influenced by load from other processes on
113 the execution host.""",
114 )
115 )
116 if contact:
117 h_global_timing.GetListOfFunctions().Add(
118 ROOT.TNamed("Contact", contact)
119 )
120 for (index, method) in statistics.StatisticCounters.values.items():
121 method_name[method] = str(method)[0] + str(method).lower()[1:].replace(
122 "_r", "R"
123 )
124 if index == 5:
125 break
126 h_global_timing.SetBinContent(
127 index + 1, statistics.get_global().time_mean(method) * 1e-6
128 )
129 h_global_timing.SetBinError(
130 index + 1, statistics.get_global().time_stddev(method) * 1e-6
131 )
132 h_global_timing.GetXaxis().SetBinLabel(index + 1, method_name[method])
133 h_global_timing.Write()
134
135 # Timing per module for the different methods
136 modules = statistics.modules
137 h_module_timing = ROOT.TH1D(
138 prefix + "ModuleTiming", "Module Timing", len(modules), 0, len(modules)
139 )
140 h_module_timing.SetStats(0)
141 h_module_timing.GetXaxis().SetTitle("module")
142 h_module_timing.GetYaxis().SetTitle("time/call [ms]")
143 h_module_timing.GetListOfFunctions().Add(
144 ROOT.TNamed(
145 "Check",
146 """There should be no significant and persistent increases in
147 the run time of a module. Only cases where the increase compared
148 to the reference or previous versions persists for at least two
149 consecutive revisions should be reported since the measurements
150 can be influenced by load from other processes on the execution
151 host.""",
152 )
153 )
154 if contact:
155 h_module_timing.GetListOfFunctions().Add(
156 ROOT.TNamed("Contact", contact)
157 )
158 for method in timing_methods:
159 h_module_timing.SetTitle(f"Module {method_name[method]} Timing")
160 h_module_timing.GetListOfFunctions().Add(
161 ROOT.TNamed(
162 "Description",
163 f"""The (average) execution time of the {method_name[method]} method of modules
164 for {job_desc}. The error bars show the rms of the time
165 distributions.""",
166 )
167 )
168 index = 1
169 for modstat in modules:
170 h_module_timing.SetBinContent(
171 index, modstat.time_mean(method) * 1e-6
172 )
173 h_module_timing.SetBinError(
174 index, modstat.time_stddev(method) * 1e-6
175 )
176 h_module_timing.GetXaxis().SetBinLabel(index, modstat.name)
177 index += 1
178 h_module_timing.Write(f"{prefix}{method_name[method]}Timing")
179 h_module_timing.GetListOfFunctions().RemoveLast()
180
181 # Memory usage profile
182 memory_profile = ROOT.Belle2.PyStoreObj("VirtualMemoryProfile", 1)
183 if memory_profile:
184 memory_profile.obj().GetListOfFunctions().Add(
185 ROOT.TNamed(
186 "Description",
187 f"The virtual memory usage vs. the event number for {job_desc}.",
188 )
189 )
190 memory_profile.obj().GetListOfFunctions().Add(
191 ROOT.TNamed(
192 "Check",
193 """The virtual memory usage should be flat for high event
194 numbers. If it keeps rising this is an indication of a memory
195 leak.<br>There should also be no significant increases with
196 respect to the reference (or previous revisions if no reference
197 exists).""",
198 )
199 )
200 if contact:
201 memory_profile.obj().GetListOfFunctions().Add(
202 ROOT.TNamed("Contact", contact)
203 )
204 memory_profile.obj().Write(prefix + "VirtualMemoryProfile")
205
206 # Rss Memory usage profile
207 memory_profile = ROOT.Belle2.PyStoreObj("RssMemoryProfile", 1)
208 if memory_profile:
209 memory_profile.obj().GetListOfFunctions().Add(
210 ROOT.TNamed(
211 "Description",
212 f"The rss memory usage vs. the event number for {job_desc}.",
213 )
214 )
215 memory_profile.obj().GetListOfFunctions().Add(
216 ROOT.TNamed(
217 "Check",
218 """The rss memory usage should be flat for high event numbers.
219 If it keeps rising this is an indication of a memory
220 leak.<br>There should also be no significant increases with
221 respect to the reference (or previous revisions if no reference
222 exists). In the (rare) case that memory is swapped by the OS,
223 the rss memory usage can decrease.""",
224 )
225 )
226 if contact:
227 memory_profile.obj().GetListOfFunctions().Add(
228 ROOT.TNamed("Contact", contact)
229 )
230 memory_profile.obj().Write(prefix + "RssMemoryProfile")
231
232 # Memory usage per module for the different methods
233 sqrt_n = 1 / math.sqrt(statistics.get_global().calls() - 1)
234 h_module_memory = ROOT.TH1D(
235 prefix + "ModuleMemory",
236 "Virtual Module Memory",
237 len(modules),
238 0,
239 len(modules),
240 )
241 h_module_memory.SetStats(0)
242 h_module_memory.GetXaxis().SetTitle("module")
243 h_module_memory.GetYaxis().SetTitle("memory increase/call [kB]")
244 h_module_memory.GetListOfFunctions().Add(
245 ROOT.TNamed(
246 "Description",
247 f"The (average) increase in virtual memory usage per call of the "
248 f"{method_name[method]} method of modules for {job_desc}.",
249 )
250 )
251 h_module_memory.GetListOfFunctions().Add(
252 ROOT.TNamed(
253 "Check",
254 "The increase in virtual memory usage per call for each module "
255 "should be consistent with zero or the reference.",
256 )
257 )
258 if contact:
259 h_module_memory.GetListOfFunctions().Add(
260 ROOT.TNamed("Contact", contact)
261 )
262 for method in memory_methds:
263 h_module_memory.SetTitle(f"Module {method_name[method]} Memory")
264 index = 1
265 for modstat in modules:
266 h_module_memory.SetBinContent(index, modstat.memory_mean(method))
267 h_module_memory.SetBinError(
268 index, modstat.memory_stddev(method) * sqrt_n
269 )
270 h_module_memory.GetXaxis().SetBinLabel(index, modstat.name)
271 index += 1
272 h_module_memory.Write(f"{prefix}{method_name[method]}Memory")
273 h_module_memory.GetListOfFunctions().RemoveLast()
274
275 if plot_file:
276 plot_file.Close()
277 save_dir.cd()
278
279
280def event_timing_plot(
281 data_file,
282 file_name="",
283 max_time=20.0,
284 burn_in=1,
285 contact="",
286 job_desc="",
287 prefix="",
288):
289 """
290 Add a validation histogram of event execution time to the given root file.
291 The current root file will be used if the fileName is empty (default).
292 The data file has to contain the profile information created by the Profile
293 module.
294 """
295
296 if not job_desc:
297 job_desc = os.path.basename(sys.argv[0])
298
299 # Get histogram with time vs event number
300 save_dir = ROOT.gDirectory
301 data = ROOT.TFile.Open(data_file)
302 tree = data.Get("tree")
303 entries = tree.GetEntries()
304 tree.Draw(
305 f"Entry$>>hEventTime({int(entries)},-0.5,{int(entries - 1)}.5)",
306 "ProfileInfo.m_timeInSec",
307 "goff",
308 )
309 # load the histogram created by the above Draw command
310 h_event_time = data.Get("hEventTime")
311 h_event_time.SetDirectory(0)
312 data.Close()
313 save_dir.cd()
314
315 # Open plot file
316 plot_file = None
317 if file_name:
318 plot_file = ROOT.TFile.Open(file_name, "UPDATE")
319
320 # Create and fill histogram with event execution time distribution
321 stat = ROOT.gStyle.GetOptStat()
322 ROOT.gStyle.SetOptStat(101110)
323 h_timing = ROOT.TH1D(prefix + "Timing", "Event Timing", 100, 0, max_time)
324 h_timing.UseCurrentStyle()
325 h_timing.GetXaxis().SetTitle("time [s]")
326 h_timing.GetYaxis().SetTitle("events")
327 h_timing.GetListOfFunctions().Add(
328 ROOT.TNamed(
329 "Description",
330 f"The distribution of event execution times for {job_desc}.",
331 )
332 )
333 h_timing.GetListOfFunctions().Add(
334 ROOT.TNamed(
335 "Check",
336 "The distribution should be consistent with the reference (or "
337 "previous revisions if no reference exists).",
338 )
339 )
340 if contact:
341 h_timing.GetListOfFunctions().Add(ROOT.TNamed("Contact", contact))
342 for event in range(1 + burn_in, entries + 1):
343 h_timing.Fill(
344 h_event_time.GetBinContent(event)
345 - h_event_time.GetBinContent(event - 1)
346 )
347 h_timing.Write()
348 ROOT.gStyle.SetOptStat(stat)
349
350 if plot_file:
351 plot_file.Close()
352 save_dir.cd()
353
354
355def draw_progress_bar(delete_lines: int, scripts: List[Script], barlength=50):
356 """
357 This function plots a progress bar of the validation, i.e. it shows which
358 percentage of the scripts has been executed yet.
359 It furthermore also shows which scripts are currently running, as well as
360 the total runtime of the validation.
361
362 @param delete_lines: The amount of lines which need to be deleted before
363 we can redraw the progress bar
364 @param scripts: List of all Script objects
365 @param barlength: The length of the progress bar (in characters)
366 @return: The number of lines that were printed by this function call.
367 Useful if this function is called repeatedly.
368 """
369
370 # Get statistics: Number of finished scripts + number of scripts in total
371 finished_scripts = len(
372 [
373 _
374 for _ in scripts
375 if _.status
376 in [
380 ]
381 ]
382 )
383 all_scripts = len(scripts)
384 percent = 100.0 * finished_scripts / all_scripts
385
386 # Get the runtime of the script
387 runtime = int(timeit.default_timer() - get_start_time())
388
389 # Move the cursor up and clear lines
390 for i in range(delete_lines):
391 print("\x1b[2K \x1b[1A", end=" ")
392
393 # Print the progress bar:
394 progressbar = ""
395 for i in range(barlength):
396 if i < int(barlength * percent / 100.0):
397 progressbar += "="
398 else:
399 progressbar += " "
400 print(
401 f"\x1b[0G[{progressbar}] {percent:6.1f}% "
402 f"({finished_scripts}/{all_scripts})"
403 )
404
405 # Print the total runtime:
406 print(f"Runtime: {runtime}s")
407
408 # Print the list of currently running scripts:
409 running = [
410 os.path.basename(__.path)
411 for __ in scripts
413 ]
414
415 # If nothing is repeatedly running
416 if not running:
417 running = ["-"]
418
419 print(f"Running: {running[0]}")
420 for __ in running[1:]:
421 print(f"{len('Running:') * ' '} {__}")
422
423 return len(running) + 2
424
425
427 """
428 This can be used to parse the execution intervals of validation scripts
429 and can check whether a script object is in the list of intervals
430 configured in this class.
431 """
432
433 def __init__(self, intervals):
434 """
435 Initializes the IntervalSelector class with a list of intervals which
436 should be selected
437 """
438
439
440 self.intervals = [x.strip() for x in intervals]
441
442 def in_interval(self, script_object: Script) -> bool:
443 """
444 checks whether the interval listed in a script object's header is
445 within the selected
446 """
447
448 return script_object.interval in self.intervals
449
450
451
454
455
456# todo: [Ref, low prio, low work] Denote private methods with underscore
457# /klieret
458class Validation:
459
460 """!
461 This is the class that provides all global variables, like 'list_of_files'
462 etc. There is only one instance of this class with name 'validation. This
463 allows to use some kind of namespace, i.e. global variables will always be
464 referenced as validation.[name of variable]. This makes it easier to
465 distinguish them from local variables that only exist within the scope of a
466 or a method.
467
468 @var tag: The name of the folder within the results directory
469 @var log: Reference to the logging object for this validation instance
470 @var basepaths: The paths to the local and central release directory
471 @var scripts: List of all Script objects for steering files
472 @var packages: List of all packages which contributed scripts
473 @var basf2_options: The options to be given to the basf2 command
474 @var mode: Whether to run locally or on a cluster
475 @var quiet: No progress bar in quiet mode
476 @var dry: Dry runs do not actually start any scripts (for debugging)
477 """
478
479 def __init__(self, tag="current"):
480 """!
481 The default constructor. Initializes all those variables that will be
482 globally accessible later on. Does not return anything.
483 """
484
485 # The name which will be used to create a folder in the results
486 # directory. Default is 'current'.
487 self.tag = tag
488
489 # This dictionary holds the paths to the local and central release dir
490 # (or 'None' if one of them does not exist)
491 self.basepaths = validationpath.get_basepath()
492
493 # Folder used for the intermediate and final results of the validation
494 self.work_folder = os.path.abspath(os.getcwd())
495
496 # The logging-object for the validation (Instance of the logging-
497 # module). Initialize the log as 'None' and then call the method
498 # 'create_log()' to create the actual log.
499 self.log = self.create_log()
500
501 # The list which holds all steering file objects
502 # (as instances of class Script)
503 self.scripts: List[Script] = []
504
505 # A list of all packages from which we have collected steering files
506 self.packages: List[str] = []
507
508 # This list of packages which will be ignored by default. This is
509 # only the validation package itself, because it only creates
510 # test-plots for validation development. To see only the
511 # validation-package output, use the --test command line flag
512 self.ignored_packages = ["validation-test"]
513
514 # Additional arguments for basf2, if we received any from the command
515 # line arguments
516 self.basf2_options = ""
517
518 # A variable which holds the mode, i.e. 'local' for local
519 # multi-processing and 'cluster' for cluster usage
520 self.mode = None
521
522 # Defines whether the validation is run in quiet mode, i.e. without
523 # the dynamic progress bar
524 self.quiet = False
525
526 # Defines if a dry run is performed, i.e. a run where the steering
527 # files are not actually started (for debugging purposes)
528 self.dry = False
529
530 # If this is set, dependencies will be ignored.
531 self.ignore_dependencies = False
532
533 # : reporting time (in minutes)
534 # the time in minutes when there will
535 # be the first log output if a script is still not complete This
536 # prints every x minutes which scripts are still running
537 self.running_script_reporting_interval = 30
538
539
542 self.script_max_runtime_in_minutes = 60 * 5
543
544
545 self.parallel = None
546
547 def get_useable_basepath(self):
548 """
549 Checks if a local path is available. If only a central release is
550 available, return the path to this central release
551 """
552 if self.basepaths["local"]:
553 return self.basepaths["local"]
554 else:
555 return self.basepaths["central"]
556
557 @staticmethod
558 def get_available_job_control():
559 """
560 insert the possible backend controls, they will be checked via their
561 is_supported method if they actually can be executed in the current
562 environment
563 """
564 return [
565 localcontrol.Local,
569 ]
570
571 @staticmethod
572 def get_available_job_control_names():
573 return [c.name() for c in Validation.get_available_job_control()]
574
575 def build_dependencies(self):
576 """!
577 This method loops over all Script objects in self.scripts and
578 calls their compute_dependencies()-method.
579 @return: None
580 """
581 for script_object in self.scripts:
582 script_object.compute_dependencies(self.scripts)
583
584 # Make sure dependent scripts of skipped scripts are skipped, too.
585 for script_object in self.scripts:
586 if script_object.status == ScriptStatus.skipped:
587 self.skip_script(
588 script_object, reason=f"Depends on '{script_object.path}'"
589 )
590
591 def build_headers(self):
592 """!
593 This method loops over all Script objects in self.scripts and
594 calls their load_header()-method.
595 @return: None
596 """
597 for script_object in self.scripts:
598 script_object.load_header()
599
600 def skip_script(self, script_object, reason=""):
601 """!
602 This method sets the status of the given script and all dependent ones
603 to 'skipped'.
604 @param script_object: Script object to be skipped.
605 @param reason: Reason for skipping object
606 @return: None
607 """
608 # Print a warning if the status of the script is changed and then
609 # set it to 'skipped'.
610 if script_object.status not in [
613 ]:
614 self.log.warning("Skipping " + script_object.path)
615 if reason:
616 self.log.debug(f"Reason for skipping: {reason}.")
617 script_object.status = ScriptStatus.skipped
618
619 # Also skip all dependent scripts.
620 for dependent_script in self.scripts:
621 if script_object in dependent_script.dependencies:
622 self.skip_script(
623 dependent_script,
624 reason=f"Depends on '{script_object.path}'",
625 )
626
627 def create_log(self) -> logging.Logger:
628 """!
629 Create the logger.
630 We use the logging module to create an object which allows us to
631 comfortably log everything that happens during the execution of
632 this script and even have different levels of importance, such as
633 'ERROR' or 'DEBUG'.
634 @return: None
635 """
636 # Create the log and set its default level to DEBUG, which means that
637 # it will store _everything_.
638 log = logging.getLogger("validate_basf2")
639 log.setLevel(logging.DEBUG)
640
641 # Now we add another custom level 'NOTE'. This is because we don't
642 # want to print ERRORs and WARNINGs to the console output, therefore
643 # we need a higher level.
644 # We define the new level and tell 'log' what to do when we use it
645 logging.NOTE = 100
646 logging.addLevelName(logging.NOTE, "NOTE")
647 log.note = lambda msg, *args: log._log(logging.NOTE, msg, args)
648
649 # Set up the console handler. The console handler will redirect a
650 # certain subset of all log message (i.e. those with level 'NOTE') to
651 # the command line (stdout), so we know what's going on when we
652 # execute the validation.
653
654 # Define the handler and its level (=NOTE)
655 console_handler = logging.StreamHandler()
656 console_handler.setLevel(logging.NOTE)
657
658 # Format the handler. We only need the message, no date/time etc.
659 console_format = logging.Formatter("%(message)s")
660 console_handler.setFormatter(console_format)
661
662 # Add the console handler to log
663 log.addHandler(console_handler)
664
665 # Now set up the file handler. The file handler will redirect
666 # _everything_ we log to a logfile so that we have all possible
667 # information available for debugging later.
668
669 # Make sure the folder for the log file exists
670 log_dir = self.get_log_folder()
671 if not os.path.exists(log_dir):
672 print("Creating " + log_dir)
673 os.makedirs(log_dir)
674
675 # Define the handler and its level (=DEBUG to get everything)
676 file_handler = logging.FileHandler(
677 os.path.join(log_dir, "validate_basf2.log"), "w+"
678 )
679 file_handler.setLevel(logging.DEBUG)
680
681 # Format the handler. We want the datetime, the module that produced
682 # the message, the LEVEL of the message and the message itself
683 file_format = logging.Formatter(
684 "%(asctime)s - %(module)s - " "%(levelname)s - %(message)s",
685 datefmt="%Y-%m-%d %H:%M:%S",
686 )
687 file_handler.setFormatter(file_format)
688
689 # Add the file handler to log
690 log.addHandler(file_handler)
691 return log
692
693 def collect_steering_files(self, interval_selector):
694 """!
695 This function will collect all steering files from the local and
696 central release directory.
697 @return: None
698 """
699
700 # Get all folders that contain steering files, first the local ones
701 validation_folders = get_validation_folders(
702 "local", self.basepaths, self.log
703 )
704
705 # Then add those central folders that do not have a local match
706 for (package, folder) in get_validation_folders(
707 "central", self.basepaths, self.log
708 ).items():
709 if package not in validation_folders:
710 validation_folders[package] = folder
711
712 # remove packages which have been explicitly ignored
713 for ignored in self.ignored_packages:
714 if ignored in validation_folders:
715 del validation_folders[ignored]
716
717 # Now write to self.packages which packages we have collected
718 self.packages = list(validation_folders.keys())
719
720 # Finally, we collect the steering files from each folder we have
721 # collected:
722 for (package, folder) in validation_folders.items():
723
724 # Collect only *.C and *.py files
725 c_files = scripts_in_dir(folder, self.log, ".C")
726 py_files = scripts_in_dir(folder, self.log, ".py")
727 for steering_file in c_files + py_files:
728 script = Script(steering_file, package, self.log)
729 script.load_header()
730 # only select this script, if this interval has been selected
731 if (
732 interval_selector.in_interval(script)
733 and not script.noexecute
734 ):
735 self.scripts.append(script)
736
737 # That's it, now there is a complete list of all steering files on
738 # which we are going to perform the validation in self.scripts
739
740 def get_log_folder(self):
741 """!
742 Get the log folder for this validation run. The command log
743 files (successful, failed) scripts will be recorded there
744 """
745 return validationpath.get_results_tag_folder(self.work_folder, self.tag)
746
747 def log_failed(self):
748 """!
749 This method logs all scripts with property failed into a single file
750 to be read in run_validation_server.py
751 """
752
753 failed_log_path = os.path.join(
754 self.get_log_folder(), "list_of_failed_scripts.log"
755 )
756 self.log.note(f"Writing list of failed scripts to {failed_log_path}.")
757
758 # Select only failed scripts
759 failed_scripts = [
760 script
761 for script in self.scripts
762 if script.status == ScriptStatus.failed
763 ]
764
765 with open(failed_log_path, "w+") as list_failed:
766 # log the name of all failed scripts
767 for script in failed_scripts:
768 list_failed.write(script.path.split("/")[-1] + "\n")
769
770 def log_skipped(self):
771 """!
772 This method logs all scripts with property skipped into a single file
773 to be read in run_validation_server.py
774 """
775
776 skipped_log_path = os.path.join(
777 self.get_log_folder(), "list_of_skipped_scripts.log"
778 )
779 self.log.note(f"Writing list of skipped scripts to {skipped_log_path}.")
780
781 # Select only failed scripts
782 skipped_scripts = [
783 script
784 for script in self.scripts
785 if script.status == ScriptStatus.skipped
786 ]
787
788 with open(skipped_log_path, "w+") as list_skipped:
789 # log the name of all failed scripts
790 for script in skipped_scripts:
791 list_skipped.write(script.path.split("/")[-1] + "\n")
792
793 def report_on_scripts(self):
794 """!
795 Print a summary about all scripts, especially highlighting
796 skipped and failed scripts.
797 """
798
799 failed_scripts = [
800 script.package + "/" + script.name
801 for script in self.scripts
802 if script.status == ScriptStatus.failed
803 ]
804 skipped_scripts = [
805 script.package + "/" + script.name
806 for script in self.scripts
807 if script.status == ScriptStatus.skipped
808 ]
809
810 self.log.note("")
811 self.log.note(
812 terminal_title_line("Summary of script execution", level=0)
813 )
814 self.log.note(f"Total number of scripts: {len(self.scripts)}")
815 self.log.note("")
816 if skipped_scripts:
817 self.log.note(
818 f"{len(skipped_scripts)}/{len(self.scripts)} scripts were skipped"
819 )
820 for s in skipped_scripts:
821 self.log.note(f"* {s}")
822 self.log.note("")
823 else:
824 self.log.note("No scripts were skipped. Nice!")
825 self.log.note("")
826
827 if failed_scripts:
828 self.log.note(
829 f"{len(failed_scripts)}/{len(self.scripts)} scripts failed"
830 )
831 for s in failed_scripts:
832 self.log.note(f"* {s}")
833 self.log.note("")
834 else:
835 self.log.note("No scripts failed. Nice!")
836 self.log.note("")
837
838 print(
840 total=len(self.scripts),
841 failure=len(failed_scripts) + len(skipped_scripts),
842 )
843 )
844
845 def set_runtime_data(self):
846 """!
847 This method sets runtime property of each script.
848 """
849
850 run_times = {}
851 path = validationpath.get_results_runtime_file(self.work_folder)
852 with open(path) as runtimes:
853
854 # Get our data
855 for line in runtimes:
856 run_times[line.split("=")[0].strip()] = line.split("=")[
857 1
858 ].strip()
859
860 # And try to set a property for each script
861 for script in self.scripts:
862 try:
863 script.runtime = float(run_times[script.name])
864 # If we don't have runtime data, then set it to an average of
865 # all runtimes
866 except KeyError:
867 suma = 0.0
868 for dict_key in run_times:
869 suma += float(run_times[dict_key])
870 script.runtime = suma / len(run_times)
871
872 def get_script_by_name(self, name: str) -> Optional[Script]:
873 """!
874
875 """
876
877 l_arr = [s for s in self.scripts if s.name == name]
878 if len(l_arr) == 1:
879 return l_arr[0]
880 else:
881 return None
882
883 def apply_package_selection(
884 self, selected_packages, ignore_dependencies=False
885 ):
886 """!
887 Only select packages from a specific set of packages, but still
888 honor the dependencies to outside scripts which may exist
889 """
890
891 to_keep_dependencies = set()
892
893 # compile the dependencies of selected scripts
894 # todo: won't work for nested dependencies
895 if not ignore_dependencies:
896 for script_obj in self.scripts:
897 if script_obj.package in selected_packages:
898 for dep in script_obj.dependencies:
899 to_keep_dependencies.add(dep.unique_name())
900 # now, remove all scripts from the script list, which are either
901 # not in the selected packages or have a dependency to them
902 self.scripts = [
903 s
904 for s in self.scripts
905 if (s.package in selected_packages)
906 or (s.unique_name() in to_keep_dependencies)
907 ]
908
909 # Check if some of the selected_packages were not found.
910 packages = {s.package for s in self.scripts}
911 packages_not_found = list(set(selected_packages) - packages)
912 if packages_not_found:
913 msg = (
914 f"You asked to select the package(s) {', '.join(packages_not_found)}, but they were not found."
915 )
916 self.log.note(msg)
917 self.log.warning(msg)
918
919 def apply_script_selection(
920 self, script_selection, ignore_dependencies=False
921 ):
922 """!
923 This method will take the validation file name ( e.g.
924 "FullTrackingValidation.py" ), determine all the script it depends on
925 and set the status of these scripts to "waiting", The status of all
926 other scripts will be set to "skipped", which means they will not be
927 executed in the validation run. If ignore_dependencies is True,
928 dependencies will also be set to "skipped".
929 """
930
931 # change file extension
932 script_selection = [
933 Script.sanitize_file_name(s) for s in script_selection
934 ]
935
936 scripts_to_enable = set()
937
938 # find the dependencies of each selected script
939 for script in script_selection:
940 scripts_to_enable.add(script)
941 script_obj = self.get_script_by_name(script)
942
943 if script_obj is None:
944 self.log.error(
945 f"Script with name {script} cannot be found, skipping for "
946 f"selection"
947 )
948 continue
949
950 others = script_obj.get_recursive_dependencies(self.scripts)
951 if not ignore_dependencies:
952 scripts_to_enable = scripts_to_enable.union(others)
953
954 # enable all selections and dependencies
955 for script_obj in self.scripts:
956 if script_obj.name in scripts_to_enable:
957 self.log.warning(
958 f"Enabling script {script_obj.name} because it was "
959 f"selected or a selected script depends on it."
960 )
961 script_obj.status = ScriptStatus.waiting
962 else:
963 self.log.warning(
964 f"Disabling script {script_obj.name} because it was "
965 f"not selected."
966 )
967 script_obj.status = ScriptStatus.skipped
968
969 # Check if some of the selected_packages were not found.
970 script_names = {Script.sanitize_file_name(s.name) for s in self.scripts}
971 scripts_not_found = set(script_selection) - script_names
972 if scripts_not_found:
973 msg = (
974 f"You requested script(s) {', '.join(scripts_not_found)}, but they seem to not have been found."
975 )
976 self.log.note(msg)
977 self.log.warning(msg)
978
979 def apply_script_caching(self):
980 cacheable_scripts = [s for s in self.scripts if s.is_cacheable]
981
982 output_dir_datafiles = validationpath.get_results_tag_folder(
983 self.work_folder, self.tag
984 )
985
986 for s in cacheable_scripts:
987 # for for all output files
988 outfiles = s.output_files
989 files_exist = True
990 for of in outfiles:
991 full_path = os.path.join(output_dir_datafiles, of)
992 files_exist = files_exist and os.path.isfile(full_path)
993
994 if files_exist:
995 s.status = ScriptStatus.cached
996
997 # Remove all cached scripts from the dependencies
998 # of dependent script objects, they will not be
999 # executed and no one needs to wait for them
1000 for script in self.scripts:
1001 for dep_script in script.dependencies:
1002 # check if the script this one is depending on is
1003 # in cached execution
1004 if dep_script.status == ScriptStatus.cached:
1005 script.dependencies.remove(dep_script)
1006
1007 def store_run_results_json(self, git_hash):
1008
1009 # retrieve the git hash which was used for executing this validation
1010 # scripts
1011 json_package = []
1012 for p in self.packages:
1013 this_package_scrits = [s for s in self.scripts if s.package == p]
1014 json_scripts = [s.to_json(self.tag) for s in this_package_scrits]
1015
1016 # count the failed scripts
1017 fail_count = sum(
1018 [s.status == ScriptStatus.failed for s in this_package_scrits]
1019 )
1020 json_package.append(
1022 p, scriptfiles=json_scripts, fail_count=fail_count
1023 )
1024 )
1025
1026 # todo: assign correct color here
1028 label=self.tag,
1029 creation_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
1030 creation_timezone=validationfunctions.get_timezone(),
1031 packages=json_package,
1032 git_hash=git_hash,
1033 )
1036 self.work_folder, self.tag
1037 ),
1038 rev,
1039 )
1040
1041 def add_script(self, script: Script):
1042 """!
1043 Explicitly add a script object. In normal operation, scripts are
1044 auto-discovered but this method is useful for testing
1045 """
1046
1047 self.scripts.append(script)
1048
1049 @staticmethod
1050 def sort_scripts(script_list: List[Script]):
1051 """
1052 Sort the list of scripts that have to be processed by runtime,
1053 execute slow scripts first If no runtime information is available
1054 from the last execution, run the scripts in the validation package
1055 first because they are log running and used as input for other scripts
1056 """
1057 script_list.sort(
1058 key=lambda x: x.runtime or x.package == "validation", reverse=True
1059 )
1060
1061 def set_tag(self, tag):
1062 """
1063 Update the validation tag to enable utils to fetch revision-wise files
1064 from the same validation instance.
1065 """
1066 self.tag = tag
1067
1068 # todo: if you have to indent by 9 tabs, you know that it's time to refactor /klieret
1069 def run_validation(self):
1070 """!
1071 This method runs the actual validation, i.e. it loops over all
1072 scripts, checks which of them are ready for execution, and runs them.
1073 @return: None
1074 """
1075
1076 # Use the local execution for all plotting scripts
1077 self.log.note("Initializing local job control for plotting.")
1078 local_control = localcontrol.Local(
1079 max_number_of_processes=self.parallel
1080 )
1081
1082 # Depending on the selected mode, load either the controls for the
1083 # cluster or for local multi-processing
1084
1085 self.log.note("Selecting job control for all other jobs.")
1086
1087 selected_controls = [
1088 c for c in self.get_available_job_control() if c.name() == self.mode
1089 ]
1090
1091 if not len(selected_controls) == 1:
1092 print(f"Selected mode {self.mode} does not exist")
1093 sys.exit(1)
1094
1095 selected_control = selected_controls[0]
1096
1097 self.log.note(
1098 f"Controller: {selected_control.name()} ({selected_control.description()})"
1099 )
1100
1101 if not selected_control.is_supported():
1102 print(f"Selected mode {self.mode} is not supported on your system")
1103 sys.exit(1)
1104
1105 # instantiate the selected job control backend
1106 if selected_control.name() == "local":
1107 control = selected_control(max_number_of_processes=self.parallel)
1108 else:
1109 control = selected_control()
1110
1111 # read the git hash which is used to produce this validation
1112 src_basepath = self.get_useable_basepath()
1113 git_hash = validationfunctions.get_compact_git_hash(src_basepath)
1114 self.log.debug(
1115 f"Git hash of repository located at {src_basepath} is {git_hash}"
1116 )
1117
1118 # todo: perhaps we want to have these files in the results folder, don't we? /klieret
1119 # If we do have runtime data, then read them
1120 if (
1121 os.path.exists("./runtimes.dat")
1122 and os.stat("./runtimes.dat").st_size
1123 ):
1124 self.set_runtime_data()
1125 if os.path.exists("./runtimes-old.dat"):
1126 # If there is an old data backup, delete it, we backup only
1127 # one run
1128 os.remove("./runtimes-old.dat")
1129 if self.mode == "local":
1130 # Backup the old data file
1131 shutil.copyfile("./runtimes.dat", "./runtimes-old.dat")
1132
1133 # Open runtimes log and start logging, but log only if we are
1134 # running in the local mode
1135 if self.mode == "local":
1136 runtimes = open("./runtimes.dat", "w+")
1137
1138 if not self.quiet:
1139 # This variable is needed for the progress bar function
1140 progress_bar_lines = 0
1141 print()
1142
1143 # The list of scripts that have to be processed
1144 remaining_scripts = [
1145 script
1146 for script in self.scripts
1147 if script.status == ScriptStatus.waiting
1148 ]
1149
1150 # Sort the list of scripts that have to be processed by runtime,
1151 # execute slow scripts first
1152 self.sort_scripts(remaining_scripts)
1153
1154 def handle_finished_script(script_obj: Script):
1155 # Write to log that the script finished
1156 self.log.debug("Finished: " + script_obj.path)
1157
1158 # If we are running locally, log a runtime
1159 script_obj.runtime = time.time() - script_obj.start_time
1160 if self.mode == "local":
1161 runtimes.write(
1162 script_obj.name + "=" + str(script_obj.runtime) + "\n"
1163 )
1164
1165 # Check for the return code and set variables accordingly
1166 script_obj.status = ScriptStatus.finished
1167 script_obj.returncode = result[1]
1168 if result[1] != 0:
1169 script_obj.status = ScriptStatus.failed
1170 self.log.warning(
1171 f"exit_status was {result[1]} for {script_obj.path}"
1172 )
1173 script_obj.remove_output_files()
1174
1175 # Skip all dependent scripts
1176 self.skip_script(
1177 script_obj,
1178 reason=f"Script '{script_object.path}' failed and we set it's status to skipped so that all dependencies " +
1179 "are also skipped.",
1180 )
1181
1182 else:
1183 # Remove this script from the dependencies of dependent
1184 # script objects
1185 for dependent_script in remaining_scripts:
1186 if script_obj in dependent_script.dependencies:
1187 dependent_script.dependencies.remove(script_obj)
1188
1189 # Some printout in quiet mode
1190 if self.quiet:
1191 waiting = [
1192 script
1193 for script in remaining_scripts
1194 if script.status == ScriptStatus.waiting
1195 ]
1196 running = [
1197 script
1198 for script in remaining_scripts
1199 if script.status == ScriptStatus.running
1200 ]
1201 print(
1202 f"Finished [{len(waiting)},{len(running)}]: {script_obj.path} -> {script_obj.status}"
1203 )
1204
1205 def handle_unfinished_script(script_obj: Script):
1206 if (
1207 time.time() - script_obj.last_report_time
1208 ) / 60.0 > self.running_script_reporting_interval:
1209 print(
1210 f"Script {script_obj.name_not_sanitized} running since {time.time() - script_obj.start_time} seconds"
1211 )
1212 # explicit flush so this will show up in log file right away
1213 sys.stdout.flush()
1214
1215 # not finished yet, log time
1216 script_obj.last_report_time = time.time()
1217
1218 # check for the maximum time a script is allow to run and
1219 # terminate if exceeded
1220 total_runtime_in_minutes = (
1221 time.time() - script_obj.start_time
1222 ) / 60.0
1223 if (
1224 total_runtime_in_minutes
1225 > self.script_max_runtime_in_minutes
1226 > 0
1227 ):
1228 script_obj.status = ScriptStatus.failed
1229 self.log.warning(
1230 f"Script {script_obj.path} did not finish after "
1231 f"{total_runtime_in_minutes} minutes, attempting to "
1232 f"terminate. "
1233 )
1234 # kill the running process
1235 script_obj.control.terminate(script_obj)
1236 # Skip all dependent scripts
1237 self.skip_script(
1238 script_obj,
1239 reason=f"Script '{script_object.path}' did not finish in "
1240 f"time, so we're setting it to 'failed' so that all "
1241 f"dependent scripts will be skipped.",
1242 )
1243
1244 def handle_waiting_script(script_obj: Script):
1245 # Determine the way of execution depending on whether
1246 # data files are created
1247 if script_obj.output_files:
1248 script_obj.control = control
1249 else:
1250 script_obj.control = local_control
1251
1252 # Do not spawn processes if there are already too many!
1253 if script_obj.control.available():
1254
1255 # Write to log which script is being started
1256 self.log.debug("Starting " + script_obj.path)
1257
1258 # Set script object variables accordingly
1259 if script_obj.status == ScriptStatus.failed:
1260 self.log.warning(f"Starting of {script_obj.path} failed")
1261 else:
1262 script_obj.status = ScriptStatus.running
1263
1264 # Actually start the script execution
1265 script_obj.control.execute(
1266 script_obj, self.basf2_options, self.dry, self.tag
1267 )
1268
1269 # Log the script execution start time
1270 script_obj.start_time = time.time()
1271 script_obj.last_report_time = time.time()
1272
1273 # Some printout in quiet mode
1274 if self.quiet:
1275 waiting = [
1276 _
1277 for _ in remaining_scripts
1278 if _.status == ScriptStatus.waiting
1279 ]
1280 running = [
1281 _
1282 for _ in remaining_scripts
1283 if _.status == ScriptStatus.running
1284 ]
1285 print(
1286 f"Started [{len(waiting)},{len(running)}]: {script_obj.path}"
1287 )
1288
1289 # While there are scripts that have not yet been executed...
1290 while remaining_scripts:
1291
1292 # Loop over all steering files / Script objects
1293 for script_object in remaining_scripts:
1294
1295 # If the script is currently running
1296 if script_object.status == ScriptStatus.running:
1297
1298 # Check if the script has finished:
1299 result = script_object.control.is_job_finished(
1300 script_object
1301 )
1302
1303 # If it has finished:
1304 if result[0]:
1305 handle_finished_script(script_object)
1306 else:
1307 handle_unfinished_script(script_object)
1308
1309 # Otherwise (the script is waiting) and if it is ready to be
1310 # executed
1311 elif not script_object.dependencies:
1312 handle_waiting_script(script_object)
1313
1314 # Update the list of scripts that have to be processed
1315 remaining_scripts = [
1316 script
1317 for script in remaining_scripts
1318 if script.status in [ScriptStatus.waiting, ScriptStatus.running]
1319 ]
1320
1321 # Sort them again, Justin Case
1322 self.sort_scripts(remaining_scripts)
1323
1324 # Wait for one second before starting again
1325 time.sleep(1)
1326
1327 # If we are not running in quiet mode, draw the progress bar
1328 if not self.quiet:
1329 progress_bar_lines = draw_progress_bar(
1330 progress_bar_lines, self.scripts
1331 )
1332
1333 # Log failed and skipped scripts
1334 self.log_failed()
1335 self.log_skipped()
1336
1337 # And close the runtime data file
1338 if self.mode == "local":
1339 runtimes.close()
1340 print()
1341
1342 self.store_run_results_json(git_hash)
1343 # todo: update list of available revisions with the current run
1344
1345 def create_plots(self):
1346 """!
1347 This method prepares the html directory for the plots if necessary
1348 and creates the plots that include the results from this validation.
1349 @return: None
1350 """
1351
1352 html_folder = validationpath.get_html_folder(self.work_folder)
1353 results_folder = validationpath.get_results_folder(self.work_folder)
1354
1355 os.makedirs(html_folder, exist_ok=True)
1356
1357 if not os.path.exists(results_folder):
1358 self.log.error(
1359 f"Folder {results_folder} not found in "
1360 f"the work directory {self.work_folder}, please run "
1361 f"b2validation first"
1362 )
1363
1364 validationplots.create_plots(force=True, work_folder=self.work_folder)
1365
1366 def save_metadata(self):
1367 """!
1368 This method fetches the metadata of all the data files produced
1369 during the validation run and saves them in individual text files of the
1370 same name (with .txt appended at the end) inside the results folder.
1371 @return: None
1372 """
1373
1374 result_folder = self.get_log_folder()
1375
1376 if not os.path.exists(result_folder):
1377 self.log.error(
1378 f"Folder {result_folder} not found in "
1379 f"the work directory {self.work_folder}, please run "
1380 f"b2validation first"
1381 )
1382
1383 # Loop through all the *.root files in the current result
1384 for file in os.listdir(result_folder):
1385 if file.endswith(".root"):
1387 os.path.join(result_folder, file))
1388 if metadata:
1389 metadata_file = os.path.join(result_folder, f'{file}.txt')
1390 # Remove old metadata file if it exists
1391 try:
1392 os.remove(metadata_file)
1393 except FileNotFoundError:
1394 pass
1395 with open(metadata_file, 'a') as out:
1396 out.write(f'{metadata}\n')
1397 else:
1398 self.log.debug(f"b2file-metadata-show failed for {file}.")
1399
1400
1401def execute(tag=None, is_test=None):
1402 """!
1403 Parses the command line and executes the full validation suite
1404 :param tag The name that will be used for the current revision.
1405 Default None means automatic.
1406 :param is_test Run in test mode? Default None means that we read this
1407 from the command line arguments (which default to False).
1408 :returns None
1409 """
1410
1411 # Note: Do not test tag and is_test, but rather cmd_arguments.tag
1412 # and cmd_arguments.is_test!
1413 # Also note that we modify some cmd_arguments below
1414 # (e.g. cmd_arguments.packages is updated if cmd_arguments.test is
1415 # specified).
1416
1417 # If there is no release of basf2 set up, we can stop the execution
1418 # right here!
1419 if (
1420 os.environ.get("BELLE2_RELEASE_DIR", None) is None
1421 and os.environ.get("BELLE2_LOCAL_DIR", None) is None
1422 ):
1423 sys.exit("Error: No basf2 release set up!")
1424
1425 # Otherwise we can start the execution. The main part is wrapped in a
1426 # try/except-construct to fetch keyboard interrupts
1427 # fixme: except instructions make only sense after Validation obj is
1428 # initialized ==> Pull everything until there out of try statement
1429 try:
1430
1431 # Now we process the command line arguments.
1432 # First of all, we read them in:
1433 cmd_arguments = parse_cmd_line_arguments(
1434 modes=Validation.get_available_job_control_names()
1435 )
1436
1437 # overwrite with default settings with parameters give in method
1438 # call
1439 if tag is not None:
1440 cmd_arguments.tag = tag
1441 if is_test is not None:
1442 cmd_arguments.test = is_test
1443
1444 # Create the validation object.
1445 validation = Validation(cmd_arguments.tag)
1446
1447 # Write to log that we have started the validation process
1448 validation.log.note("Starting validation...")
1449 validation.log.note(
1450 f'Results will stored in a folder named "{validation.tag}"...'
1451 )
1452 validation.log.note(
1453 f"The (full) log file(s) can be found at {', '.join(get_log_file_paths(validation.log))}"
1454 )
1455 validation.log.note(
1456 "Please check these logs when encountering "
1457 "unexpected results, as most of the warnings and "
1458 "errors are not written to stdout/stderr."
1459 )
1460
1461 # Check if we received additional arguments for basf2
1462 if cmd_arguments.options:
1463 validation.basf2_options = " ".join(cmd_arguments.options)
1464 validation.log.note(
1465 f"Received arguments for basf2: {validation.basf2_options}"
1466 )
1467
1468 # Check if we are using the cluster or local multiprocessing:
1469 validation.mode = cmd_arguments.mode
1470
1471 # Set if we have a limit on the maximum number of local processes
1472 validation.parallel = cmd_arguments.parallel
1473
1474 # Check if we are running in quiet mode (no progress bar)
1475 if cmd_arguments.quiet:
1476 validation.log.note("Running in quiet mode (no progress bar).")
1477 validation.quiet = True
1478
1479 # Check if we are performing a dry run (don't actually start scripts)
1480 if cmd_arguments.dry:
1481 validation.log.note(
1482 "Performing a dry run; no scripts will be " "started."
1483 )
1484 validation.dry = True
1485
1486 # If running in test mode, only execute scripts in validation package
1487 if cmd_arguments.test:
1488 validation.log.note("Running in test mode")
1489 validation.ignored_packages = []
1490 cmd_arguments.packages = ["validation-test"]
1491
1492 validation.log.note(
1493 f"Release Folder: {validation.basepaths['central']}"
1494 )
1495 validation.log.note(
1496 f"Local Folder: {validation.basepaths['local']}"
1497 )
1498
1499 # Now collect the steering files which will be used in this validation.
1500 validation.log.note("Collecting steering files...")
1501 intervals = cmd_arguments.intervals.split(",")
1502 validation.collect_steering_files(IntervalSelector(intervals))
1503
1504 # Build headers for every script object we have created
1505 validation.log.note("Building headers for Script objects...")
1506 validation.build_headers()
1507
1508 # Build dependencies for every script object we have created,
1509 # unless we're asked to ignore them.
1510 if not cmd_arguments.select_ignore_dependencies:
1511 validation.log.note("Building dependencies for Script objects...")
1512 validation.build_dependencies()
1513
1514 if cmd_arguments.packages:
1515 validation.log.note(
1516 "Applying package selection for the following package(s): "
1517 + ", ".join(cmd_arguments.packages)
1518 )
1519 validation.apply_package_selection(cmd_arguments.packages)
1520
1521 # select only specific scripts, if this option has been set
1522 if cmd_arguments.select:
1523 validation.log.note("Applying selection for validation scripts")
1524 validation.apply_script_selection(
1525 cmd_arguments.select, ignore_dependencies=False
1526 )
1527
1528 # select only specific scripts and ignore their dependencies if
1529 # option is set
1530 if cmd_arguments.select_ignore_dependencies:
1531 validation.log.note(
1532 "Applying selection for validation scripts, "
1533 "ignoring their dependencies"
1534 )
1535 validation.apply_script_selection(
1536 cmd_arguments.select_ignore_dependencies,
1537 ignore_dependencies=True,
1538 )
1539
1540 # check if the scripts which are cacheable can be skipped, because
1541 # their output is already available
1542 if cmd_arguments.use_cache:
1543 validation.log.note("Checking for cached script output")
1544 validation.apply_script_caching()
1545
1546 # Allow to change the maximal run time of the scripts
1547 if cmd_arguments.max_run_time is not None:
1548 if cmd_arguments.max_run_time > 0:
1549 validation.log.note(
1550 f"Setting maximal run time of the steering files "
1551 f"to {cmd_arguments.max_run_time} minutes."
1552 )
1553 else:
1554 validation.log.note(
1555 "Disabling run time limitation of steering files as "
1556 "requested (max run time set to <= 0)."
1557 )
1558 validation.script_max_runtime_in_minutes = (
1559 cmd_arguments.max_run_time
1560 )
1561
1562 # Start the actual validation
1563 validation.log.note("Starting the validation...")
1564 validation.run_validation()
1565
1566 # Log that the validation has finished and that we are creating plots
1567 validation.log.note("Validation finished...")
1568 if not validation.dry:
1569 validation.log.note("Start creating plots...")
1570 validation.create_plots()
1571 validation.log.note("Plots have been created...")
1572 else:
1573 validation.log.note(
1574 "Skipping plot creation " "(dry run)..."
1575 )
1576 # send mails
1577 if cmd_arguments.send_mails:
1578 mails = mail_log.Mails(validation)
1579 validation.log.note("Start sending mails...")
1580 # send mails to all users with failed scripts/comparison
1581 if cmd_arguments.send_mails_mode == "incremental":
1582 incremental = True
1583 elif cmd_arguments.send_mails_mode == "full":
1584 incremental = False
1585 else:
1586 incremental = None
1587 mails.send_all_mails(incremental=incremental)
1588 validation.log.note(
1589 f"Save mail data to {validation.get_log_folder()}"
1590 )
1591 # save json with data about outgoing mails
1592 mails.write_log()
1593 validation.report_on_scripts()
1594
1595 validation.save_metadata()
1596
1597 # Log that everything is finished
1598 validation.log.note(
1599 f"Validation finished! Total runtime: {int(timeit.default_timer() - get_start_time())}s"
1600 )
1601
1602 if cmd_arguments.view:
1603 # run local webserver
1604 validationserver.run_server(open_site=True)
1605
1606 except KeyboardInterrupt:
1607 validation.log.note("Validation terminated by user!")
Provides functionality to send mails in case of failed scripts / validation plots.
Definition mail_log.py:45
list intervals
stores the intervals which have been selected
__init__(self, intervals)
bool in_interval(self, Script script_object)
dump(file_name, obj)
str congratulator(Optional[Union[int, float]] success=None, Optional[Union[int, float]] failure=None, Optional[Union[int, float]] total=None, just_comment=False, rate_name="Success rate")
Optional[str] get_compact_git_hash(str repo_folder)
str get_file_metadata(str filename)
get_html_folder(output_base_dir)
get_results_tag_folder(output_base_dir, tag)
get_results_tag_revision_file(output_base_dir, tag)
get_results_folder(output_base_dir)
get_results_runtime_file(output_base_dir)
create_plots(revisions=None, force=False, Optional[Queue] process_queue=None, work_folder=".")
run_server(ip="127.0.0.1", port=8000, parse_command_line=False, open_site=False, dry_run=False)