Belle II Software development
validation.py
1#!/usr/bin/env python3
2
3
10
11# basf2 specific imports
12from basf2 import statistics
13from ROOT import PyConfig
14
15PyConfig.IgnoreCommandLineOptions = True # noqa
16import ROOT
17
18# Normal library imports
19import math
20import logging
21import os
22import timeit
23import sys
24import time
25import shutil
26import datetime
27from typing import List, Optional
28
29import json_objects
30import mail_log
31
32# A pretty printer. Prints prettier lists, dicts, etc. :)
33import pprint
34
35from validationscript import Script, ScriptStatus
36from validationfunctions import (
37 get_start_time,
38 get_validation_folders,
39 scripts_in_dir,
40 parse_cmd_line_arguments,
41 get_log_file_paths,
42 terminal_title_line,
43)
44import validationfunctions
45
46import validationserver
47import validationplots
48import validationscript
49import validationpath
50
51# local and cluster control backends
52import localcontrol
53import clustercontrol
54import clustercontrolsge
55import clustercontroldrmaa
56
57
58pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
59
60
61def statistics_plots(
62 file_name="",
63 timing_methods=None,
64 memory_methds=None,
65 contact="",
66 job_desc="",
67 prefix="",
68):
69 """
70 Add memory usage and execution time validation plots to the given root
71 file. The current root file will be used if the fileName is empty (
72 default).
73 """
74
75 if not timing_methods:
76 timing_methods = [statistics.INIT, statistics.EVENT]
77 if not memory_methds:
78 memory_methds = [statistics.INIT, statistics.EVENT]
79
80 # Open plot file
81 save_dir = ROOT.gDirectory
82 plot_file = None
83 if file_name:
84 plot_file = ROOT.TFile.Open(file_name, "UPDATE")
85
86 if not job_desc:
87 job_desc = sys.argv[1]
88
89 # Global timing
90 method_name = {}
91 h_global_timing = ROOT.TH1D(
92 prefix + "GlobalTiming", "Global Timing", 5, 0, 5
93 )
94 h_global_timing.SetStats(0)
95 h_global_timing.GetXaxis().SetTitle("method")
96 h_global_timing.GetYaxis().SetTitle("time/call [ms]")
97 h_global_timing.GetListOfFunctions().Add(
98 ROOT.TNamed(
99 "Description",
100 f"""The (average) time of the different basf2 execution phases
101 for {job_desc}. The error bars show the rms of the time
102 distributions.""",
103 )
104 )
105 h_global_timing.GetListOfFunctions().Add(
106 ROOT.TNamed(
107 "Check",
108 """There should be no significant and persistent increases in
109 the the run time of the methods. Only cases where the increase
110 compared to the reference or previous versions persists for at
111 least two consecutive revisions should be reported since the
112 measurements can be influenced by load from other processes on
113 the execution host.""",
114 )
115 )
116 if contact:
117 h_global_timing.GetListOfFunctions().Add(
118 ROOT.TNamed("Contact", contact)
119 )
120 for (index, method) in statistics.StatisticCounters.values.items():
121 method_name[method] = str(method)[0] + str(method).lower()[1:].replace(
122 "_r", "R"
123 )
124 if index == 5:
125 break
126 h_global_timing.SetBinContent(
127 index + 1, statistics.get_global().time_mean(method) * 1e-6
128 )
129 h_global_timing.SetBinError(
130 index + 1, statistics.get_global().time_stddev(method) * 1e-6
131 )
132 h_global_timing.GetXaxis().SetBinLabel(index + 1, method_name[method])
133 h_global_timing.Write()
134
135 # Timing per module for the different methods
136 modules = statistics.modules
137 h_module_timing = ROOT.TH1D(
138 prefix + "ModuleTiming", "Module Timing", len(modules), 0, len(modules)
139 )
140 h_module_timing.SetStats(0)
141 h_module_timing.GetXaxis().SetTitle("module")
142 h_module_timing.GetYaxis().SetTitle("time/call [ms]")
143 h_module_timing.GetListOfFunctions().Add(
144 ROOT.TNamed(
145 "Check",
146 """There should be no significant and persistent increases in
147 the run time of a module. Only cases where the increase compared
148 to the reference or previous versions persists for at least two
149 consecutive revisions should be reported since the measurements
150 can be influenced by load from other processes on the execution
151 host.""",
152 )
153 )
154 if contact:
155 h_module_timing.GetListOfFunctions().Add(
156 ROOT.TNamed("Contact", contact)
157 )
158 for method in timing_methods:
159 h_module_timing.SetTitle(f"Module {method_name[method]} Timing")
160 h_module_timing.GetListOfFunctions().Add(
161 ROOT.TNamed(
162 "Description",
163 f"""The (average) execution time of the {method_name[method]} method of modules
164 for {job_desc}. The error bars show the rms of the time
165 distributions.""",
166 )
167 )
168 index = 1
169 for modstat in modules:
170 h_module_timing.SetBinContent(
171 index, modstat.time_mean(method) * 1e-6
172 )
173 h_module_timing.SetBinError(
174 index, modstat.time_stddev(method) * 1e-6
175 )
176 h_module_timing.GetXaxis().SetBinLabel(index, modstat.name)
177 index += 1
178 h_module_timing.Write(f"{prefix}{method_name[method]}Timing")
179 h_module_timing.GetListOfFunctions().RemoveLast()
180
181 # Memory usage profile
182 memory_profile = ROOT.Belle2.PyStoreObj("VirtualMemoryProfile", 1)
183 if memory_profile:
184 memory_profile.obj().GetListOfFunctions().Add(
185 ROOT.TNamed(
186 "Description",
187 f"The virtual memory usage vs. the event number for {job_desc}.",
188 )
189 )
190 memory_profile.obj().GetListOfFunctions().Add(
191 ROOT.TNamed(
192 "Check",
193 """The virtual memory usage should be flat for high event
194 numbers. If it keeps rising this is an indication of a memory
195 leak.<br>There should also be no significant increases with
196 respect to the reference (or previous revisions if no reference
197 exists).""",
198 )
199 )
200 if contact:
201 memory_profile.obj().GetListOfFunctions().Add(
202 ROOT.TNamed("Contact", contact)
203 )
204 memory_profile.obj().Write(prefix + "VirtualMemoryProfile")
205
206 # Rss Memory usage profile
207 memory_profile = ROOT.Belle2.PyStoreObj("RssMemoryProfile", 1)
208 if memory_profile:
209 memory_profile.obj().GetListOfFunctions().Add(
210 ROOT.TNamed(
211 "Description",
212 f"The rss memory usage vs. the event number for {job_desc}.",
213 )
214 )
215 memory_profile.obj().GetListOfFunctions().Add(
216 ROOT.TNamed(
217 "Check",
218 """The rss memory usage should be flat for high event numbers.
219 If it keeps rising this is an indication of a memory
220 leak.<br>There should also be no significant increases with
221 respect to the reference (or previous revisions if no reference
222 exists). In the (rare) case that memory is swapped by the OS,
223 the rss memory usage can decrease.""",
224 )
225 )
226 if contact:
227 memory_profile.obj().GetListOfFunctions().Add(
228 ROOT.TNamed("Contact", contact)
229 )
230 memory_profile.obj().Write(prefix + "RssMemoryProfile")
231
232 # Memory usage per module for the different methods
233 sqrt_n = 1 / math.sqrt(statistics.get_global().calls() - 1)
234 h_module_memory = ROOT.TH1D(
235 prefix + "ModuleMemory",
236 "Virtual Module Memory",
237 len(modules),
238 0,
239 len(modules),
240 )
241 h_module_memory.SetStats(0)
242 h_module_memory.GetXaxis().SetTitle("module")
243 h_module_memory.GetYaxis().SetTitle("memory increase/call [kB]")
244 h_module_memory.GetListOfFunctions().Add(
245 ROOT.TNamed(
246 "Description",
247 f"The (average) increase in virtual memory usage per call of the "
248 f"{method_name[method]} method of modules for {job_desc}.",
249 )
250 )
251 h_module_memory.GetListOfFunctions().Add(
252 ROOT.TNamed(
253 "Check",
254 "The increase in virtual memory usage per call for each module "
255 "should be consistent with zero or the reference.",
256 )
257 )
258 if contact:
259 h_module_memory.GetListOfFunctions().Add(
260 ROOT.TNamed("Contact", contact)
261 )
262 for method in memory_methds:
263 h_module_memory.SetTitle(f"Module {method_name[method]} Memory")
264 index = 1
265 for modstat in modules:
266 h_module_memory.SetBinContent(index, modstat.memory_mean(method))
267 h_module_memory.SetBinError(
268 index, modstat.memory_stddev(method) * sqrt_n
269 )
270 h_module_memory.GetXaxis().SetBinLabel(index, modstat.name)
271 index += 1
272 h_module_memory.Write(f"{prefix}{method_name[method]}Memory")
273 h_module_memory.GetListOfFunctions().RemoveLast()
274
275 if plot_file:
276 plot_file.Close()
277 save_dir.cd()
278
279
280def event_timing_plot(
281 data_file,
282 file_name="",
283 max_time=20.0,
284 burn_in=1,
285 contact="",
286 job_desc="",
287 prefix="",
288):
289 """
290 Add a validation histogram of event execution time to the given root file.
291 The current root file will be used if the fileName is empty (default).
292 The data file has to contain the profile information created by the Profile
293 module.
294 """
295
296 if not job_desc:
297 job_desc = os.path.basename(sys.argv[0])
298
299 # Get histogram with time vs event number
300 save_dir = ROOT.gDirectory
301 data = ROOT.TFile.Open(data_file)
302 tree = data.Get("tree")
303 entries = tree.GetEntries()
304 tree.Draw(
305 f"Entry$>>hEventTime({int(entries)},-0.5,{int(entries - 1)}.5)",
306 "ProfileInfo.m_timeInSec",
307 "goff",
308 )
309 # load the histogram created by the above Draw command
310 h_event_time = data.Get("hEventTime")
311 h_event_time.SetDirectory(0)
312 data.Close()
313 save_dir.cd()
314
315 # Open plot file
316 plot_file = None
317 if file_name:
318 plot_file = ROOT.TFile.Open(file_name, "UPDATE")
319
320 # Create and fill histogram with event execution time distribution
321 stat = ROOT.gStyle.GetOptStat()
322 ROOT.gStyle.SetOptStat(101110)
323 h_timing = ROOT.TH1D(prefix + "Timing", "Event Timing", 100, 0, max_time)
324 h_timing.UseCurrentStyle()
325 h_timing.GetXaxis().SetTitle("time [s]")
326 h_timing.GetYaxis().SetTitle("events")
327 h_timing.GetListOfFunctions().Add(
328 ROOT.TNamed(
329 "Description",
330 f"The distribution of event execution times for {job_desc}.",
331 )
332 )
333 h_timing.GetListOfFunctions().Add(
334 ROOT.TNamed(
335 "Check",
336 "The distribution should be consistent with the reference (or "
337 "previous revisions if no reference exists).",
338 )
339 )
340 if contact:
341 h_timing.GetListOfFunctions().Add(ROOT.TNamed("Contact", contact))
342 for event in range(1 + burn_in, entries + 1):
343 h_timing.Fill(
344 h_event_time.GetBinContent(event)
345 - h_event_time.GetBinContent(event - 1)
346 )
347 h_timing.Write()
348 ROOT.gStyle.SetOptStat(stat)
349
350 if plot_file:
351 plot_file.Close()
352 save_dir.cd()
353
354
355def draw_progress_bar(delete_lines: int, scripts: List[Script], barlength=50):
356 """
357 This function plots a progress bar of the validation, i.e. it shows which
358 percentage of the scripts has been executed yet.
359 It furthermore also shows which scripts are currently running, as well as
360 the total runtime of the validation.
361
362 @param delete_lines: The amount of lines which need to be deleted before
363 we can redraw the progress bar
364 @param scripts: List of all Script objects
365 @param barlength: The length of the progress bar (in characters)
366 @return: The number of lines that were printed by this function call.
367 Useful if this function is called repeatedly.
368 """
369
370 # Get statistics: Number of finished scripts + number of scripts in total
371 finished_scripts = len(
372 [
373 _
374 for _ in scripts
375 if _.status
376 in [
380 ]
381 ]
382 )
383 all_scripts = len(scripts)
384 percent = 100.0 * finished_scripts / all_scripts
385
386 # Get the runtime of the script
387 runtime = int(timeit.default_timer() - get_start_time())
388
389 # Move the cursor up and clear lines
390 for i in range(delete_lines):
391 print("\x1b[2K \x1b[1A", end=" ")
392
393 # Print the progress bar:
394 progressbar = ""
395 for i in range(barlength):
396 if i < int(barlength * percent / 100.0):
397 progressbar += "="
398 else:
399 progressbar += " "
400 print(
401 f"\x1b[0G[{progressbar}] {percent:6.1f}% "
402 f"({finished_scripts}/{all_scripts})"
403 )
404
405 # Print the total runtime:
406 print(f"Runtime: {runtime}s")
407
408 # Print the list of currently running scripts:
409 running = [
410 os.path.basename(__.path)
411 for __ in scripts
413 ]
414
415 # If nothing is repeatedly running
416 if not running:
417 running = ["-"]
418
419 print(f"Running: {running[0]}")
420 for __ in running[1:]:
421 print(f"{len('Running:') * ' '} {__}")
422
423 return len(running) + 2
424
425
427 """
428 This can be used to parse the execution intervals of validation scripts
429 and can check whether a script object is in the list of intervals
430 configured in this class.
431 """
432
433 def __init__(self, intervals):
434 """
435 Initializes the IntervalSelector class with a list of intervals which
436 should be selected
437 """
438
439
440 self.intervals = [x.strip() for x in intervals]
441
442 def in_interval(self, script_object: Script) -> bool:
443 """
444 checks whether the interval listed in a script object's header is
445 within the selected
446 """
447
448 return script_object.interval in self.intervals
449
450
451
454
455
456# todo: [Ref, low prio, low work] Denote private methods with underscore
457# /klieret
458class Validation:
459
460 """!
461 This is the class that provides all global variables, like 'list_of_files' etc. There is only one instance of this class with name 'validation. This
462 allows to use some kind of namespace, i.e. global variables will always be
463 referenced as validation.[name of variable]. This makes it easier to
464 distinguish them from local variables that only exist within the scope of a
465 or a method.
466
467 @var tag: The name of the folder within the results directory
468 @var log: Reference to the logging object for this validation instance
469 @var basepaths: The paths to the local and central release directory
470 @var scripts: List of all Script objects for steering files
471 @var packages: List of all packages which contributed scripts
472 @var basf2_options: The options to be given to the basf2 command
473 @var mode: Whether to run locally or on a cluster
474 @var quiet: No progress bar in quiet mode
475 @var dry: Dry runs do not actually start any scripts (for debugging)
476 """
477
478 def __init__(self, tag="current"):
479 """!
480 The default constructor. Initializes all those variables that will be
481 globally accessible later on. Does not return anything.
482 """
483
484 # The name which will be used to create a folder in the results
485 # directory. Default is 'current'.
486 self.tag = tag
487
488 # This dictionary holds the paths to the local and central release dir
489 # (or 'None' if one of them does not exist)
490 self.basepaths = validationpath.get_basepath()
491
492 # Folder used for the intermediate and final results of the validation
493 self.work_folder = os.path.abspath(os.getcwd())
494
495 # The logging-object for the validation (Instance of the logging-
496 # module). Initialize the log as 'None' and then call the method
497 # 'create_log()' to create the actual log.
498 self.log = self.create_log()
499
500 # The list which holds all steering file objects
501 # (as instances of class Script)
502 self.scripts: List[Script] = []
503
504 # A list of all packages from which we have collected steering files
505 self.packages: List[str] = []
506
507 # This list of packages which will be ignored by default. This is
508 # only the validation package itself, because it only creates
509 # test-plots for validation development. To see only the
510 # validation-package output, use the --test command line flag
511 self.ignored_packages = ["validation-test"]
512
513 # Additional arguments for basf2, if we received any from the command
514 # line arguments
515 self.basf2_options = ""
516
517 # A variable which holds the mode, i.e. 'local' for local
518 # multi-processing and 'cluster' for cluster usage
519 self.mode = None
520
521 # Defines whether the validation is run in quiet mode, i.e. without
522 # the dynamic progress bar
523 self.quiet = False
524
525 # Defines if a dry run is performed, i.e. a run where the steering
526 # files are not actually started (for debugging purposes)
527 self.dry = False
528
529 # If this is set, dependencies will be ignored.
530 self.ignore_dependencies = False
531
532 # : reporting time (in minutes)
533 # the time in minutes when there will
534 # be the first log output if a script is still not complete This
535 # prints every x minutes which scripts are still running
536 self.running_script_reporting_interval = 30
537
538
541 self.script_max_runtime_in_minutes = 60 * 5
542
543
544 self.parallel = None
545
546 def get_useable_basepath(self):
547 """
548 Checks if a local path is available. If only a central release is
549 available, return the path to this central release
550 """
551 if self.basepaths["local"]:
552 return self.basepaths["local"]
553 else:
554 return self.basepaths["central"]
555
556 @staticmethod
557 def get_available_job_control():
558 """
559 insert the possible backend controls, they will be checked via their
560 is_supported method if they actually can be executed in the current
561 environment
562 """
563 return [
564 localcontrol.Local,
568 ]
569
570 @staticmethod
571 def get_available_job_control_names():
572 return [c.name() for c in Validation.get_available_job_control()]
573
574 def build_dependencies(self):
575 """!
576 This method loops over all Script objects in self.scripts and
577 calls their compute_dependencies()-method.
578 @return: None
579 """
580 for script_object in self.scripts:
581 script_object.compute_dependencies(self.scripts)
582
583 # Make sure dependent scripts of skipped scripts are skipped, too.
584 for script_object in self.scripts:
585 if script_object.status == ScriptStatus.skipped:
586 self.skip_script(
587 script_object, reason=f"Depends on '{script_object.path}'"
588 )
589
590 def build_headers(self):
591 """!
592 This method loops over all Script objects in self.scripts and
593 calls their load_header()-method.
594 @return: None
595 """
596 for script_object in self.scripts:
597 script_object.load_header()
598
599 def skip_script(self, script_object, reason=""):
600 """!
601 This method sets the status of the given script and all dependent ones
602 to 'skipped'.
603 @param script_object: Script object to be skipped.
604 @param reason: Reason for skipping object
605 @return: None
606 """
607 # Print a warning if the status of the script is changed and then
608 # set it to 'skipped'.
609 if script_object.status not in [
612 ]:
613 self.log.warning("Skipping " + script_object.path)
614 if reason:
615 self.log.debug(f"Reason for skipping: {reason}.")
616 script_object.status = ScriptStatus.skipped
617
618 # Also skip all dependent scripts.
619 for dependent_script in self.scripts:
620 if script_object in dependent_script.dependencies:
621 self.skip_script(
622 dependent_script,
623 reason=f"Depends on '{script_object.path}'",
624 )
625
626 def create_log(self) -> logging.Logger:
627 """!
628 Create the logger.
629 We use the logging module to create an object which allows us to
630 comfortably log everything that happens during the execution of
631 this script and even have different levels of importance, such as
632 'ERROR' or 'DEBUG'.
633 @return: None
634 """
635 # Create the log and set its default level to DEBUG, which means that
636 # it will store _everything_.
637 log = logging.getLogger("validate_basf2")
638 log.setLevel(logging.DEBUG)
639
640 # Now we add another custom level 'NOTE'. This is because we don't
641 # want to print ERRORs and WARNINGs to the console output, therefore
642 # we need a higher level.
643 # We define the new level and tell 'log' what to do when we use it
644 logging.NOTE = 100
645 logging.addLevelName(logging.NOTE, "NOTE")
646 log.note = lambda msg, *args: log._log(logging.NOTE, msg, args)
647
648 # Set up the console handler. The console handler will redirect a
649 # certain subset of all log message (i.e. those with level 'NOTE') to
650 # the command line (stdout), so we know what's going on when we
651 # execute the validation.
652
653 # Define the handler and its level (=NOTE)
654 console_handler = logging.StreamHandler()
655 console_handler.setLevel(logging.NOTE)
656
657 # Format the handler. We only need the message, no date/time etc.
658 console_format = logging.Formatter("%(message)s")
659 console_handler.setFormatter(console_format)
660
661 # Add the console handler to log
662 log.addHandler(console_handler)
663
664 # Now set up the file handler. The file handler will redirect
665 # _everything_ we log to a logfile so that we have all possible
666 # information available for debugging later.
667
668 # Make sure the folder for the log file exists
669 log_dir = self.get_log_folder()
670 if not os.path.exists(log_dir):
671 print("Creating " + log_dir)
672 os.makedirs(log_dir)
673
674 # Define the handler and its level (=DEBUG to get everything)
675 file_handler = logging.FileHandler(
676 os.path.join(log_dir, "validate_basf2.log"), "w+"
677 )
678 file_handler.setLevel(logging.DEBUG)
679
680 # Format the handler. We want the datetime, the module that produced
681 # the message, the LEVEL of the message and the message itself
682 file_format = logging.Formatter(
683 "%(asctime)s - %(module)s - " "%(levelname)s - %(message)s",
684 datefmt="%Y-%m-%d %H:%M:%S",
685 )
686 file_handler.setFormatter(file_format)
687
688 # Add the file handler to log
689 log.addHandler(file_handler)
690 return log
691
692 def collect_steering_files(self, interval_selector):
693 """!
694 This function will collect all steering files from the local and
695 central release directory.
696 @return: None
697 """
698
699 # Get all folders that contain steering files, first the local ones
700 validation_folders = get_validation_folders(
701 "local", self.basepaths, self.log
702 )
703
704 # Then add those central folders that do not have a local match
705 for (package, folder) in get_validation_folders(
706 "central", self.basepaths, self.log
707 ).items():
708 if package not in validation_folders:
709 validation_folders[package] = folder
710
711 # remove packages which have been explicitly ignored
712 for ignored in self.ignored_packages:
713 if ignored in validation_folders:
714 del validation_folders[ignored]
715
716 # Now write to self.packages which packages we have collected
717 self.packages = list(validation_folders.keys())
718
719 # Finally, we collect the steering files from each folder we have
720 # collected:
721 for (package, folder) in validation_folders.items():
722
723 # Collect only *.C and *.py files
724 c_files = scripts_in_dir(folder, self.log, ".C")
725 py_files = scripts_in_dir(folder, self.log, ".py")
726 for steering_file in c_files + py_files:
727 script = Script(steering_file, package, self.log)
728 script.load_header()
729 # only select this script, if this interval has been selected
730 if (
731 interval_selector.in_interval(script)
732 and not script.noexecute
733 ):
734 self.scripts.append(script)
735
736 # That's it, now there is a complete list of all steering files on
737 # which we are going to perform the validation in self.scripts
738
739 def get_log_folder(self):
740 """!
741 Get the log folder for this validation run. The command log
742 files (successful, failed) scripts will be recorded there
743 """
744 return validationpath.get_results_tag_folder(self.work_folder, self.tag)
745
746 def log_failed(self):
747 """!
748 This method logs all scripts with property failed into a single file
749 to be read in run_validation_server.py
750 """
751
752 failed_log_path = os.path.join(
753 self.get_log_folder(), "list_of_failed_scripts.log"
754 )
755 self.log.note(f"Writing list of failed scripts to {failed_log_path}.")
756
757 # Select only failed scripts
758 failed_scripts = [
759 script
760 for script in self.scripts
761 if script.status == ScriptStatus.failed
762 ]
763
764 with open(failed_log_path, "w+") as list_failed:
765 # log the name of all failed scripts
766 for script in failed_scripts:
767 list_failed.write(script.path.split("/")[-1] + "\n")
768
769 def log_skipped(self):
770 """!
771 This method logs all scripts with property skipped into a single file
772 to be read in run_validation_server.py
773 """
774
775 skipped_log_path = os.path.join(
776 self.get_log_folder(), "list_of_skipped_scripts.log"
777 )
778 self.log.note(f"Writing list of skipped scripts to {skipped_log_path}.")
779
780 # Select only failed scripts
781 skipped_scripts = [
782 script
783 for script in self.scripts
784 if script.status == ScriptStatus.skipped
785 ]
786
787 with open(skipped_log_path, "w+") as list_skipped:
788 # log the name of all failed scripts
789 for script in skipped_scripts:
790 list_skipped.write(script.path.split("/")[-1] + "\n")
791
792 def report_on_scripts(self):
793 """!
794 Print a summary about all scripts, especially highlighting
795 skipped and failed scripts.
796 """
797
798 failed_scripts = [
799 script.package + "/" + script.name
800 for script in self.scripts
801 if script.status == ScriptStatus.failed
802 ]
803 skipped_scripts = [
804 script.package + "/" + script.name
805 for script in self.scripts
806 if script.status == ScriptStatus.skipped
807 ]
808
809 self.log.note("")
810 self.log.note(
811 terminal_title_line("Summary of script execution", level=0)
812 )
813 self.log.note(f"Total number of scripts: {len(self.scripts)}")
814 self.log.note("")
815 if skipped_scripts:
816 self.log.note(
817 f"{len(skipped_scripts)}/{len(self.scripts)} scripts were skipped"
818 )
819 for s in skipped_scripts:
820 self.log.note(f"* {s}")
821 self.log.note("")
822 else:
823 self.log.note("No scripts were skipped. Nice!")
824 self.log.note("")
825
826 if failed_scripts:
827 self.log.note(
828 f"{len(failed_scripts)}/{len(self.scripts)} scripts failed"
829 )
830 for s in failed_scripts:
831 self.log.note(f"* {s}")
832 self.log.note("")
833 else:
834 self.log.note("No scripts failed. Nice!")
835 self.log.note("")
836
837 print(
839 total=len(self.scripts),
840 failure=len(failed_scripts) + len(skipped_scripts),
841 )
842 )
843
844 def set_runtime_data(self):
845 """!
846 This method sets runtime property of each script.
847 """
848
849 run_times = {}
850 path = validationpath.get_results_runtime_file(self.work_folder)
851 with open(path) as runtimes:
852
853 # Get our data
854 for line in runtimes:
855 run_times[line.split("=")[0].strip()] = line.split("=")[
856 1
857 ].strip()
858
859 # And try to set a property for each script
860 for script in self.scripts:
861 try:
862 script.runtime = float(run_times[script.name])
863 # If we don't have runtime data, then set it to an average of
864 # all runtimes
865 except KeyError:
866 suma = 0.0
867 for dict_key in run_times:
868 suma += float(run_times[dict_key])
869 script.runtime = suma / len(run_times)
870
871 def get_script_by_name(self, name: str) -> Optional[Script]:
872 """!
873
874 """
875
876 l_arr = [s for s in self.scripts if s.name == name]
877 if len(l_arr) == 1:
878 return l_arr[0]
879 else:
880 return None
881
882 def apply_package_selection(
883 self, selected_packages, ignore_dependencies=False
884 ):
885 """!
886 Only select packages from a specific set of packages, but still
887 honor the dependencies to outside scripts which may exist
888 """
889
890 to_keep_dependencies = set()
891
892 # compile the dependencies of selected scripts
893 # todo: won't work for nested dependencies
894 if not ignore_dependencies:
895 for script_obj in self.scripts:
896 if script_obj.package in selected_packages:
897 for dep in script_obj.dependencies:
898 to_keep_dependencies.add(dep.unique_name())
899 # now, remove all scripts from the script list, which are either
900 # not in the selected packages or have a dependency to them
901 self.scripts = [
902 s
903 for s in self.scripts
904 if (s.package in selected_packages)
905 or (s.unique_name() in to_keep_dependencies)
906 ]
907
908 # Check if some of the selected_packages were not found.
909 packages = {s.package for s in self.scripts}
910 packages_not_found = list(set(selected_packages) - packages)
911 if packages_not_found:
912 msg = (
913 f"You asked to select the package(s) {', '.join(packages_not_found)}, but they were not found."
914 )
915 self.log.note(msg)
916 self.log.warning(msg)
917
918 def apply_script_selection(
919 self, script_selection, ignore_dependencies=False
920 ):
921 """!
922 This method will take the validation file name ( e.g.
923 "FullTrackingValidation.py" ), determine all the script it depends on
924 and set the status of theses scripts to "waiting", The status of all
925 other scripts will be set to "skipped", which means they will not be
926 executed in the validation run. If ignore_dependencies is True,
927 dependencies will also be set to "skipped".
928 """
929
930 # change file extension
931 script_selection = [
932 Script.sanitize_file_name(s) for s in script_selection
933 ]
934
935 scripts_to_enable = set()
936
937 # find the dependencies of each selected script
938 for script in script_selection:
939 scripts_to_enable.add(script)
940 script_obj = self.get_script_by_name(script)
941
942 if script_obj is None:
943 self.log.error(
944 f"Script with name {script} cannot be found, skipping for "
945 f"selection"
946 )
947 continue
948
949 others = script_obj.get_recursive_dependencies(self.scripts)
950 if not ignore_dependencies:
951 scripts_to_enable = scripts_to_enable.union(others)
952
953 # enable all selections and dependencies
954 for script_obj in self.scripts:
955 if script_obj.name in scripts_to_enable:
956 self.log.warning(
957 f"Enabling script {script_obj.name} because it was "
958 f"selected or a selected script depends on it."
959 )
960 script_obj.status = ScriptStatus.waiting
961 else:
962 self.log.warning(
963 f"Disabling script {script_obj.name} because it was "
964 f"not selected."
965 )
966 script_obj.status = ScriptStatus.skipped
967
968 # Check if some of the selected_packages were not found.
969 script_names = {Script.sanitize_file_name(s.name) for s in self.scripts}
970 scripts_not_found = set(script_selection) - script_names
971 if scripts_not_found:
972 msg = (
973 f"You requested script(s) {', '.join(scripts_not_found)}, but they seem to not have been found."
974 )
975 self.log.note(msg)
976 self.log.warning(msg)
977
978 def apply_script_caching(self):
979 cacheable_scripts = [s for s in self.scripts if s.is_cacheable]
980
981 output_dir_datafiles = validationpath.get_results_tag_folder(
982 self.work_folder, self.tag
983 )
984
985 for s in cacheable_scripts:
986 # for for all output files
987 outfiles = s.output_files
988 files_exist = True
989 for of in outfiles:
990 full_path = os.path.join(output_dir_datafiles, of)
991 files_exist = files_exist and os.path.isfile(full_path)
992
993 if files_exist:
994 s.status = ScriptStatus.cached
995
996 # Remove all cached scripts from the dependencies
997 # of dependent script objects, they will not be
998 # executed and no one needs to wait for them
999 for script in self.scripts:
1000 for dep_script in script.dependencies:
1001 # check if the script this one is depending on is
1002 # in cached execution
1003 if dep_script.status == ScriptStatus.cached:
1004 script.dependencies.remove(dep_script)
1005
1006 def store_run_results_json(self, git_hash):
1007
1008 # retrieve the git hash which was used for executing this validation
1009 # scripts
1010 json_package = []
1011 for p in self.packages:
1012 this_package_scrits = [s for s in self.scripts if s.package == p]
1013 json_scripts = [s.to_json(self.tag) for s in this_package_scrits]
1014
1015 # count the failed scripts
1016 fail_count = sum(
1017 [s.status == ScriptStatus.failed for s in this_package_scrits]
1018 )
1019 json_package.append(
1021 p, scriptfiles=json_scripts, fail_count=fail_count
1022 )
1023 )
1024
1025 # todo: assign correct color here
1027 label=self.tag,
1028 creation_date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
1029 creation_timezone=validationfunctions.get_timezone(),
1030 packages=json_package,
1031 git_hash=git_hash,
1032 )
1035 self.work_folder, self.tag
1036 ),
1037 rev,
1038 )
1039
1040 def add_script(self, script: Script):
1041 """!
1042 Explicitly add a script object. In normal operation, scripts are
1043 auto-discovered but this method is useful for testing
1044 """
1045
1046 self.scripts.append(script)
1047
1048 @staticmethod
1049 def sort_scripts(script_list: List[Script]):
1050 """
1051 Sort the list of scripts that have to be processed by runtime,
1052 execute slow scripts first If no runtime information is available
1053 from the last execution, run the scripts in the validation package
1054 first because they are log running and used as input for other scripts
1055 """
1056 script_list.sort(
1057 key=lambda x: x.runtime or x.package == "validation", reverse=True
1058 )
1059
1060 def set_tag(self, tag):
1061 """
1062 Update the validation tag to enable utils to fetch revision-wise files
1063 from the same validation instance.
1064 """
1065 self.tag = tag
1066
1067 # todo: if you have to indent by 9 tabs, you know that it's time to refactor /klieret
1068 def run_validation(self):
1069 """!
1070 This method runs the actual validation, i.e. it loops over all
1071 scripts, checks which of them are ready for execution, and runs them.
1072 @return: None
1073 """
1074
1075 # Use the local execution for all plotting scripts
1076 self.log.note("Initializing local job control for plotting.")
1077 local_control = localcontrol.Local(
1078 max_number_of_processes=self.parallel
1079 )
1080
1081 # Depending on the selected mode, load either the controls for the
1082 # cluster or for local multi-processing
1083
1084 self.log.note("Selecting job control for all other jobs.")
1085
1086 selected_controls = [
1087 c for c in self.get_available_job_control() if c.name() == self.mode
1088 ]
1089
1090 if not len(selected_controls) == 1:
1091 print(f"Selected mode {self.mode} does not exist")
1092 sys.exit(1)
1093
1094 selected_control = selected_controls[0]
1095
1096 self.log.note(
1097 f"Controller: {selected_control.name()} ({selected_control.description()})"
1098 )
1099
1100 if not selected_control.is_supported():
1101 print(f"Selected mode {self.mode} is not supported on your system")
1102 sys.exit(1)
1103
1104 # instantiate the selected job control backend
1105 if selected_control.name() == "local":
1106 control = selected_control(max_number_of_processes=self.parallel)
1107 else:
1108 control = selected_control()
1109
1110 # read the git hash which is used to produce this validation
1111 src_basepath = self.get_useable_basepath()
1112 git_hash = validationfunctions.get_compact_git_hash(src_basepath)
1113 self.log.debug(
1114 f"Git hash of repository located at {src_basepath} is {git_hash}"
1115 )
1116
1117 # todo: perhaps we want to have these files in the results folder, don't we? /klieret
1118 # If we do have runtime data, then read them
1119 if (
1120 os.path.exists("./runtimes.dat")
1121 and os.stat("./runtimes.dat").st_size
1122 ):
1123 self.set_runtime_data()
1124 if os.path.exists("./runtimes-old.dat"):
1125 # If there is an old data backup, delete it, we backup only
1126 # one run
1127 os.remove("./runtimes-old.dat")
1128 if self.mode == "local":
1129 # Backup the old data file
1130 shutil.copyfile("./runtimes.dat", "./runtimes-old.dat")
1131
1132 # Open runtimes log and start logging, but log only if we are
1133 # running in the local mode
1134 if self.mode == "local":
1135 runtimes = open("./runtimes.dat", "w+")
1136
1137 if not self.quiet:
1138 # This variable is needed for the progress bar function
1139 progress_bar_lines = 0
1140 print()
1141
1142 # The list of scripts that have to be processed
1143 remaining_scripts = [
1144 script
1145 for script in self.scripts
1146 if script.status == ScriptStatus.waiting
1147 ]
1148
1149 # Sort the list of scripts that have to be processed by runtime,
1150 # execute slow scripts first
1151 self.sort_scripts(remaining_scripts)
1152
1153 def handle_finished_script(script_obj: Script):
1154 # Write to log that the script finished
1155 self.log.debug("Finished: " + script_obj.path)
1156
1157 # If we are running locally, log a runtime
1158 script_obj.runtime = time.time() - script_obj.start_time
1159 if self.mode == "local":
1160 runtimes.write(
1161 script_obj.name + "=" + str(script_obj.runtime) + "\n"
1162 )
1163
1164 # Check for the return code and set variables accordingly
1165 script_obj.status = ScriptStatus.finished
1166 script_obj.returncode = result[1]
1167 if result[1] != 0:
1168 script_obj.status = ScriptStatus.failed
1169 self.log.warning(
1170 f"exit_status was {result[1]} for {script_obj.path}"
1171 )
1172 script_obj.remove_output_files()
1173
1174 # Skip all dependent scripts
1175 self.skip_script(
1176 script_obj,
1177 reason=f"Script '{script_object.path}' failed and we set it's status to skipped so that all dependencies " +
1178 "are also skipped.",
1179 )
1180
1181 else:
1182 # Remove this script from the dependencies of dependent
1183 # script objects
1184 for dependent_script in remaining_scripts:
1185 if script_obj in dependent_script.dependencies:
1186 dependent_script.dependencies.remove(script_obj)
1187
1188 # Some printout in quiet mode
1189 if self.quiet:
1190 waiting = [
1191 script
1192 for script in remaining_scripts
1193 if script.status == ScriptStatus.waiting
1194 ]
1195 running = [
1196 script
1197 for script in remaining_scripts
1198 if script.status == ScriptStatus.running
1199 ]
1200 print(
1201 f"Finished [{len(waiting)},{len(running)}]: {script_obj.path} -> {script_obj.status}"
1202 )
1203
1204 def handle_unfinished_script(script_obj: Script):
1205 if (
1206 time.time() - script_obj.last_report_time
1207 ) / 60.0 > self.running_script_reporting_interval:
1208 print(
1209 f"Script {script_obj.name_not_sanitized} running since {time.time() - script_obj.start_time} seconds"
1210 )
1211 # explicit flush so this will show up in log file right away
1212 sys.stdout.flush()
1213
1214 # not finished yet, log time
1215 script_obj.last_report_time = time.time()
1216
1217 # check for the maximum time a script is allow to run and
1218 # terminate if exceeded
1219 total_runtime_in_minutes = (
1220 time.time() - script_obj.start_time
1221 ) / 60.0
1222 if (
1223 total_runtime_in_minutes
1224 > self.script_max_runtime_in_minutes
1225 > 0
1226 ):
1227 script_obj.status = ScriptStatus.failed
1228 self.log.warning(
1229 f"Script {script_obj.path} did not finish after "
1230 f"{total_runtime_in_minutes} minutes, attempting to "
1231 f"terminate. "
1232 )
1233 # kill the running process
1234 script_obj.control.terminate(script_obj)
1235 # Skip all dependent scripts
1236 self.skip_script(
1237 script_obj,
1238 reason=f"Script '{script_object.path}' did not finish in "
1239 f"time, so we're setting it to 'failed' so that all "
1240 f"dependent scripts will be skipped.",
1241 )
1242
1243 def handle_waiting_script(script_obj: Script):
1244 # Determine the way of execution depending on whether
1245 # data files are created
1246 if script_obj.output_files:
1247 script_obj.control = control
1248 else:
1249 script_obj.control = local_control
1250
1251 # Do not spawn processes if there are already too many!
1252 if script_obj.control.available():
1253
1254 # Write to log which script is being started
1255 self.log.debug("Starting " + script_obj.path)
1256
1257 # Set script object variables accordingly
1258 if script_obj.status == ScriptStatus.failed:
1259 self.log.warning(f"Starting of {script_obj.path} failed")
1260 else:
1261 script_obj.status = ScriptStatus.running
1262
1263 # Actually start the script execution
1264 script_obj.control.execute(
1265 script_obj, self.basf2_options, self.dry, self.tag
1266 )
1267
1268 # Log the script execution start time
1269 script_obj.start_time = time.time()
1270 script_obj.last_report_time = time.time()
1271
1272 # Some printout in quiet mode
1273 if self.quiet:
1274 waiting = [
1275 _
1276 for _ in remaining_scripts
1277 if _.status == ScriptStatus.waiting
1278 ]
1279 running = [
1280 _
1281 for _ in remaining_scripts
1282 if _.status == ScriptStatus.running
1283 ]
1284 print(
1285 f"Started [{len(waiting)},{len(running)}]: {script_obj.path}"
1286 )
1287
1288 # While there are scripts that have not yet been executed...
1289 while remaining_scripts:
1290
1291 # Loop over all steering files / Script objects
1292 for script_object in remaining_scripts:
1293
1294 # If the script is currently running
1295 if script_object.status == ScriptStatus.running:
1296
1297 # Check if the script has finished:
1298 result = script_object.control.is_job_finished(
1299 script_object
1300 )
1301
1302 # If it has finished:
1303 if result[0]:
1304 handle_finished_script(script_object)
1305 else:
1306 handle_unfinished_script(script_object)
1307
1308 # Otherwise (the script is waiting) and if it is ready to be
1309 # executed
1310 elif not script_object.dependencies:
1311 handle_waiting_script(script_object)
1312
1313 # Update the list of scripts that have to be processed
1314 remaining_scripts = [
1315 script
1316 for script in remaining_scripts
1317 if script.status in [ScriptStatus.waiting, ScriptStatus.running]
1318 ]
1319
1320 # Sort them again, Justin Case
1321 self.sort_scripts(remaining_scripts)
1322
1323 # Wait for one second before starting again
1324 time.sleep(1)
1325
1326 # If we are not running in quiet mode, draw the progress bar
1327 if not self.quiet:
1328 progress_bar_lines = draw_progress_bar(
1329 progress_bar_lines, self.scripts
1330 )
1331
1332 # Log failed and skipped scripts
1333 self.log_failed()
1334 self.log_skipped()
1335
1336 # And close the runtime data file
1337 if self.mode == "local":
1338 runtimes.close()
1339 print()
1340
1341 self.store_run_results_json(git_hash)
1342 # todo: update list of available revisions with the current run
1343
1344 def create_plots(self):
1345 """!
1346 This method prepares the html directory for the plots if necessary
1347 and creates the plots that include the results from this validation.
1348 @return: None
1349 """
1350
1351 html_folder = validationpath.get_html_folder(self.work_folder)
1352 results_folder = validationpath.get_results_folder(self.work_folder)
1353
1354 os.makedirs(html_folder, exist_ok=True)
1355
1356 if not os.path.exists(results_folder):
1357 self.log.error(
1358 f"Folder {results_folder} not found in "
1359 f"the work directory {self.work_folder}, please run "
1360 f"b2validation first"
1361 )
1362
1363 validationplots.create_plots(force=True, work_folder=self.work_folder)
1364
1365 def save_metadata(self):
1366 """!
1367 This method fetches the metadata of all the data files produced
1368 during the validation run and saves them in individual text files of the
1369 same name (with .txt appended at the end) inside the results folder.
1370 @return: None
1371 """
1372
1373 result_folder = self.get_log_folder()
1374
1375 if not os.path.exists(result_folder):
1376 self.log.error(
1377 f"Folder {result_folder} not found in "
1378 f"the work directory {self.work_folder}, please run "
1379 f"b2validation first"
1380 )
1381
1382 # Loop through all the *.root files in the current result
1383 for file in os.listdir(result_folder):
1384 if file.endswith(".root"):
1386 os.path.join(result_folder, file))
1387 if metadata:
1388 metadata_file = os.path.join(result_folder, f'{file}.txt')
1389 # Remove old metadata file if it exists
1390 try:
1391 os.remove(metadata_file)
1392 except FileNotFoundError:
1393 pass
1394 with open(metadata_file, 'a') as out:
1395 out.write(f'{metadata}\n')
1396 else:
1397 self.log.debug(f"b2file-metadata-show failed for {file}.")
1398
1399
1400def execute(tag=None, is_test=None):
1401 """!
1402 Parses the command line and executes the full validation suite
1403 :param tag The name that will be used for the current revision.
1404 Default None means automatic.
1405 :param is_test Run in test mode? Default None means that we read this
1406 from the command line arguments (which default to False).
1407 :returns None
1408 """
1409
1410 # Note: Do not test tag and is_test, but rather cmd_arguments.tag
1411 # and cmd_arguments.is_test!
1412 # Also note that we modify some cmd_arguments below
1413 # (e.g. cmd_arguments.packages is updated if cmd_arguments.test is
1414 # specified).
1415
1416 # If there is no release of basf2 set up, we can stop the execution
1417 # right here!
1418 if (
1419 os.environ.get("BELLE2_RELEASE_DIR", None) is None
1420 and os.environ.get("BELLE2_LOCAL_DIR", None) is None
1421 ):
1422 sys.exit("Error: No basf2 release set up!")
1423
1424 # Otherwise we can start the execution. The main part is wrapped in a
1425 # try/except-construct to fetch keyboard interrupts
1426 # fixme: except instructions make only sense after Validation obj is
1427 # initialized ==> Pull everything until there out of try statement
1428 try:
1429
1430 # Now we process the command line arguments.
1431 # First of all, we read them in:
1432 cmd_arguments = parse_cmd_line_arguments(
1433 modes=Validation.get_available_job_control_names()
1434 )
1435
1436 # overwrite with default settings with parameters give in method
1437 # call
1438 if tag is not None:
1439 cmd_arguments.tag = tag
1440 if is_test is not None:
1441 cmd_arguments.test = is_test
1442
1443 # Create the validation object.
1444 validation = Validation(cmd_arguments.tag)
1445
1446 # Write to log that we have started the validation process
1447 validation.log.note("Starting validation...")
1448 validation.log.note(
1449 f'Results will stored in a folder named "{validation.tag}"...'
1450 )
1451 validation.log.note(
1452 f"The (full) log file(s) can be found at {', '.join(get_log_file_paths(validation.log))}"
1453 )
1454 validation.log.note(
1455 "Please check these logs when encountering "
1456 "unexpected results, as most of the warnings and "
1457 "errors are not written to stdout/stderr."
1458 )
1459
1460 # Check if we received additional arguments for basf2
1461 if cmd_arguments.options:
1462 validation.basf2_options = " ".join(cmd_arguments.options)
1463 validation.log.note(
1464 f"Received arguments for basf2: {validation.basf2_options}"
1465 )
1466
1467 # Check if we are using the cluster or local multiprocessing:
1468 validation.mode = cmd_arguments.mode
1469
1470 # Set if we have a limit on the maximum number of local processes
1471 validation.parallel = cmd_arguments.parallel
1472
1473 # Check if we are running in quiet mode (no progress bar)
1474 if cmd_arguments.quiet:
1475 validation.log.note("Running in quiet mode (no progress bar).")
1476 validation.quiet = True
1477
1478 # Check if we are performing a dry run (don't actually start scripts)
1479 if cmd_arguments.dry:
1480 validation.log.note(
1481 "Performing a dry run; no scripts will be " "started."
1482 )
1483 validation.dry = True
1484
1485 # If running in test mode, only execute scripts in validation package
1486 if cmd_arguments.test:
1487 validation.log.note("Running in test mode")
1488 validation.ignored_packages = []
1489 cmd_arguments.packages = ["validation-test"]
1490
1491 validation.log.note(
1492 f"Release Folder: {validation.basepaths['central']}"
1493 )
1494 validation.log.note(
1495 f"Local Folder: {validation.basepaths['local']}"
1496 )
1497
1498 # Now collect the steering files which will be used in this validation.
1499 validation.log.note("Collecting steering files...")
1500 intervals = cmd_arguments.intervals.split(",")
1501 validation.collect_steering_files(IntervalSelector(intervals))
1502
1503 # Build headers for every script object we have created
1504 validation.log.note("Building headers for Script objects...")
1505 validation.build_headers()
1506
1507 # Build dependencies for every script object we have created,
1508 # unless we're asked to ignore them.
1509 if not cmd_arguments.select_ignore_dependencies:
1510 validation.log.note("Building dependencies for Script objects...")
1511 validation.build_dependencies()
1512
1513 if cmd_arguments.packages:
1514 validation.log.note(
1515 "Applying package selection for the following package(s): "
1516 + ", ".join(cmd_arguments.packages)
1517 )
1518 validation.apply_package_selection(cmd_arguments.packages)
1519
1520 # select only specific scripts, if this option has been set
1521 if cmd_arguments.select:
1522 validation.log.note("Applying selection for validation scripts")
1523 validation.apply_script_selection(
1524 cmd_arguments.select, ignore_dependencies=False
1525 )
1526
1527 # select only specific scripts and ignore their dependencies if
1528 # option is set
1529 if cmd_arguments.select_ignore_dependencies:
1530 validation.log.note(
1531 "Applying selection for validation scripts, "
1532 "ignoring their dependencies"
1533 )
1534 validation.apply_script_selection(
1535 cmd_arguments.select_ignore_dependencies,
1536 ignore_dependencies=True,
1537 )
1538
1539 # check if the scripts which are cacheable can be skipped, because
1540 # their output is already available
1541 if cmd_arguments.use_cache:
1542 validation.log.note("Checking for cached script output")
1543 validation.apply_script_caching()
1544
1545 # Allow to change the maximal run time of the scripts
1546 if cmd_arguments.max_run_time is not None:
1547 if cmd_arguments.max_run_time > 0:
1548 validation.log.note(
1549 f"Setting maximal run time of the steering files "
1550 f"to {cmd_arguments.max_run_time} minutes."
1551 )
1552 else:
1553 validation.log.note(
1554 "Disabling run time limitation of steering files as "
1555 "requested (max run time set to <= 0)."
1556 )
1557 validation.script_max_runtime_in_minutes = (
1558 cmd_arguments.max_run_time
1559 )
1560
1561 # Start the actual validation
1562 validation.log.note("Starting the validation...")
1563 validation.run_validation()
1564
1565 # Log that the validation has finished and that we are creating plots
1566 validation.log.note("Validation finished...")
1567 if not validation.dry:
1568 validation.log.note("Start creating plots...")
1569 validation.create_plots()
1570 validation.log.note("Plots have been created...")
1571 else:
1572 validation.log.note(
1573 "Skipping plot creation " "(dry run)..."
1574 )
1575 # send mails
1576 if cmd_arguments.send_mails:
1577 mails = mail_log.Mails(validation)
1578 validation.log.note("Start sending mails...")
1579 # send mails to all users with failed scripts/comparison
1580 if cmd_arguments.send_mails_mode == "incremental":
1581 incremental = True
1582 elif cmd_arguments.send_mails_mode == "full":
1583 incremental = False
1584 else:
1585 incremental = None
1586 mails.send_all_mails(incremental=incremental)
1587 validation.log.note(
1588 f"Save mail data to {validation.get_log_folder()}"
1589 )
1590 # save json with data about outgoing mails
1591 mails.write_log()
1592 validation.report_on_scripts()
1593
1594 validation.save_metadata()
1595
1596 # Log that everything is finished
1597 validation.log.note(
1598 f"Validation finished! Total runtime: {int(timeit.default_timer() - get_start_time())}s"
1599 )
1600
1601 if cmd_arguments.view:
1602 # run local webserver
1603 validationserver.run_server(open_site=True)
1604
1605 except KeyboardInterrupt:
1606 validation.log.note("Validation terminated by user!")
1607
Provides functionality to send mails in case of failed scripts / validation plots.
Definition: mail_log.py:45
intervals
stores the intervals which have been selected
Definition: validation.py:440
bool in_interval(self, Script script_object)
Definition: validation.py:442
def __init__(self, intervals)
Definition: validation.py:433
def dump(file_name, obj)
str congratulator(Optional[Union[int, float]] success=None, Optional[Union[int, float]] failure=None, Optional[Union[int, float]] total=None, just_comment=False, rate_name="Success rate")
Optional[str] get_compact_git_hash(str repo_folder)
str get_file_metadata(str filename)
def get_html_folder(output_base_dir)
def get_results_runtime_file(output_base_dir)
def get_results_folder(output_base_dir)
def get_results_tag_revision_file(output_base_dir, tag)
def get_results_tag_folder(output_base_dir, tag)
def create_plots(revisions=None, force=False, Optional[Queue] process_queue=None, work_folder=".")
def run_server(ip="127.0.0.1", port=8000, parse_command_line=False, open_site=False, dry_run=False)