12 from basf2
import statistics
13 from ROOT
import PyConfig
15 PyConfig.IgnoreCommandLineOptions =
True
27 from typing
import List, Optional
35 from validationscript
import Script, ScriptStatus
36 from validationfunctions
import (
38 get_validation_folders,
40 parse_cmd_line_arguments,
44 import validationfunctions
46 import validationserver
47 import validationplots
48 import validationscript
54 import clustercontrolsge
55 import clustercontroldrmaa
58 pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
70 Add memory usage and execution time validation plots to the given root
71 file. The current root file will be used if the fileName is empty (
75 if not timing_methods:
76 timing_methods = [statistics.INIT, statistics.EVENT]
78 memory_methds = [statistics.INIT, statistics.EVENT]
81 save_dir = ROOT.gDirectory
84 plot_file = ROOT.TFile.Open(file_name,
"UPDATE")
87 job_desc = sys.argv[1]
91 h_global_timing = ROOT.TH1D(
92 prefix +
"GlobalTiming",
"Global Timing", 5, 0, 5
94 h_global_timing.SetStats(0)
95 h_global_timing.GetXaxis().SetTitle(
"method")
96 h_global_timing.GetYaxis().SetTitle(
"time/call [ms]")
97 h_global_timing.GetListOfFunctions().Add(
100 """The (average) time of the different basf2 execution phases
101 for {}. The error bars show the rms of the time
102 distributions.""".format(
107 h_global_timing.GetListOfFunctions().Add(
110 """There should be no significant and persistent increases in
111 the the run time of the methods. Only cases where the increase
112 compared to the reference or previous versions persists for at
113 least two consecutive revisions should be reported since the
114 measurements can be influenced by load from other processes on
115 the execution host.""",
119 h_global_timing.GetListOfFunctions().Add(
120 ROOT.TNamed(
"Contact", contact)
122 for (index, method)
in statistics.StatisticCounters.values.items():
123 method_name[method] = str(method)[0] + str(method).lower()[1:].replace(
128 h_global_timing.SetBinContent(
129 index + 1, statistics.get_global().time_mean(method) * 1e-6
131 h_global_timing.SetBinError(
132 index + 1, statistics.get_global().time_stddev(method) * 1e-6
134 h_global_timing.GetXaxis().SetBinLabel(index + 1, method_name[method])
135 h_global_timing.Write()
138 modules = statistics.modules
139 h_module_timing = ROOT.TH1D(
140 prefix +
"ModuleTiming",
"Module Timing", len(modules), 0, len(modules)
142 h_module_timing.SetStats(0)
143 h_module_timing.GetXaxis().SetTitle(
"module")
144 h_module_timing.GetYaxis().SetTitle(
"time/call [ms]")
145 h_module_timing.GetListOfFunctions().Add(
148 """There should be no significant and persistent increases in
149 the run time of a module. Only cases where the increase compared
150 to the reference or previous versions persists for at least two
151 consecutive revisions should be reported since the measurements
152 can be influenced by load from other processes on the execution
157 h_module_timing.GetListOfFunctions().Add(
158 ROOT.TNamed(
"Contact", contact)
160 for method
in timing_methods:
161 h_module_timing.SetTitle(
"Module %s Timing" % method_name[method])
162 h_module_timing.GetListOfFunctions().Add(
165 """The (average) execution time of the %s method of modules
166 for %s. The error bars show the rms of the time
168 % (method_name[method], job_desc),
172 for modstat
in modules:
173 h_module_timing.SetBinContent(
174 index, modstat.time_mean(method) * 1e-6
176 h_module_timing.SetBinError(
177 index, modstat.time_stddev(method) * 1e-6
179 h_module_timing.GetXaxis().SetBinLabel(index, modstat.name)
181 h_module_timing.Write(
"{}{}Timing".format(prefix, method_name[method]))
182 h_module_timing.GetListOfFunctions().RemoveLast()
185 memory_profile = ROOT.Belle2.PyStoreObj(
"VirtualMemoryProfile", 1)
187 memory_profile.obj().GetListOfFunctions().Add(
190 f
"The virtual memory usage vs. the event number for {job_desc}.",
193 memory_profile.obj().GetListOfFunctions().Add(
196 """The virtual memory usage should be flat for high event
197 numbers. If it keeps rising this is an indication of a memory
198 leak.<br>There should also be no significant increases with
199 respect to the reference (or previous revisions if no reference
204 memory_profile.obj().GetListOfFunctions().Add(
205 ROOT.TNamed(
"Contact", contact)
207 memory_profile.obj().Write(prefix +
"VirtualMemoryProfile")
210 memory_profile = ROOT.Belle2.PyStoreObj(
"RssMemoryProfile", 1)
212 memory_profile.obj().GetListOfFunctions().Add(
215 f
"The rss memory usage vs. the event number for {job_desc}.",
218 memory_profile.obj().GetListOfFunctions().Add(
221 """The rss memory usage should be flat for high event numbers.
222 If it keeps rising this is an indication of a memory
223 leak.<br>There should also be no significant increases with
224 respect to the reference (or previous revisions if no reference
225 exists). In the (rare) case that memory is swapped by the OS,
226 the rss memory usage can decrease.""",
230 memory_profile.obj().GetListOfFunctions().Add(
231 ROOT.TNamed(
"Contact", contact)
233 memory_profile.obj().Write(prefix +
"RssMemoryProfile")
236 sqrt_n = 1 / math.sqrt(statistics.get_global().calls() - 1)
237 h_module_memory = ROOT.TH1D(
238 prefix +
"ModuleMemory",
239 "Virtual Module Memory",
244 h_module_memory.SetStats(0)
245 h_module_memory.GetXaxis().SetTitle(
"module")
246 h_module_memory.GetYaxis().SetTitle(
"memory increase/call [kB]")
247 h_module_memory.GetListOfFunctions().Add(
250 f
"The (average) increase in virtual memory usage per call of the "
251 f
"{method_name[method]} method of modules for {job_desc}.",
254 h_module_memory.GetListOfFunctions().Add(
257 "The increase in virtual memory usage per call for each module "
258 "should be consistent with zero or the reference.",
262 h_module_memory.GetListOfFunctions().Add(
263 ROOT.TNamed(
"Contact", contact)
265 for method
in memory_methds:
266 h_module_memory.SetTitle(
"Module %s Memory" % method_name[method])
268 for modstat
in modules:
269 h_module_memory.SetBinContent(index, modstat.memory_mean(method))
270 h_module_memory.SetBinError(
271 index, modstat.memory_stddev(method) * sqrt_n
273 h_module_memory.GetXaxis().SetBinLabel(index, modstat.name)
275 h_module_memory.Write(
"{}{}Memory".format(prefix, method_name[method]))
276 h_module_memory.GetListOfFunctions().RemoveLast()
283 def event_timing_plot(
293 Add a validation histogram of event execution time to the given root file.
294 The current root file will be used if the fileName is empty (default).
295 The data file has to contain the profile information created by the Profile
300 job_desc = os.path.basename(sys.argv[0])
303 save_dir = ROOT.gDirectory
304 data = ROOT.TFile.Open(data_file)
305 tree = data.Get(
"tree")
306 entries = tree.GetEntries()
308 "Entry$>>hEventTime(%d,-0.5,%d.5)" % (entries, entries - 1),
309 "ProfileInfo.m_timeInSec",
313 h_event_time = data.Get(
"hEventTime")
314 h_event_time.SetDirectory(0)
321 plot_file = ROOT.TFile.Open(file_name,
"UPDATE")
324 stat = ROOT.gStyle.GetOptStat()
325 ROOT.gStyle.SetOptStat(101110)
326 h_timing = ROOT.TH1D(prefix +
"Timing",
"Event Timing", 100, 0, max_time)
327 h_timing.UseCurrentStyle()
328 h_timing.GetXaxis().SetTitle(
"time [s]")
329 h_timing.GetYaxis().SetTitle(
"events")
330 h_timing.GetListOfFunctions().Add(
333 f
"The distribution of event execution times for {job_desc}.",
336 h_timing.GetListOfFunctions().Add(
339 "The distribution should be consistent with the reference (or "
340 "previous revisions if no reference exists).",
344 h_timing.GetListOfFunctions().Add(ROOT.TNamed(
"Contact", contact))
345 for event
in range(1 + burn_in, entries + 1):
347 h_event_time.GetBinContent(event)
348 - h_event_time.GetBinContent(event - 1)
351 ROOT.gStyle.SetOptStat(stat)
358 def draw_progress_bar(delete_lines: int, scripts: List[Script], barlength=50):
360 This function plots a progress bar of the validation, i.e. it shows which
361 percentage of the scripts has been executed yet.
362 It furthermore also shows which scripts are currently running, as well as
363 the total runtime of the validation.
365 @param delete_lines: The amount of lines which need to be deleted before
366 we can redraw the progress bar
367 @param scripts: List of all Script obejcts
368 @param barlength: The length of the progess bar (in characters)
369 @return: The number of lines that were printed by this function call.
370 Usefule if this function is called repeatedly.
374 finished_scripts = len(
386 all_scripts = len(scripts)
387 percent = 100.0 * finished_scripts / all_scripts
390 runtime = int(timeit.default_timer() - get_start_time())
393 for i
in range(delete_lines):
394 print(
"\x1b[2K \x1b[1A", end=
" ")
398 for i
in range(barlength):
399 if i < int(barlength * percent / 100.0):
404 f
"\x1b[0G[{progressbar}] {percent:6.1f}% "
405 f
"({finished_scripts}/{all_scripts})"
409 print(f
"Runtime: {runtime}s")
413 os.path.basename(__.path)
422 print(f
"Running: {running[0]}")
423 for __
in running[1:]:
424 print(
"{} {}".format(len(
"Running:") *
" ", __))
426 return len(running) + 2
431 This can be used to parse the execution intervals of validation scripts
432 and can check whether a script object is in the list of intervals
433 configured in this class.
438 Initializes the IntervalSelector class with a list of intervals which
443 self.
intervalsintervals = [x.strip()
for x
in intervals]
447 checks whether the interval listed in a script object's header is
451 return script_object.interval
in self.
intervalsintervals
464 This is the class that provides all global variables, like 'list_of_files'
465 etc. There is only one instance of this class with name 'validation. This
466 allows to use some kind of namespace, i.e. global variables will always be
467 referenced as validation.[name of variable]. This makes it easier to
468 distinguish them from local variables that only exist within the scope of a
471 @var tag: The name of the folder within the results directory
472 @var log: Reference to the logging object for this validation instance
473 @var basepaths: The paths to the local and central release directory
474 @var scripts: List of all Script objects for steering files
475 @var packages: List of all packages which contributed scripts
476 @var basf2_options: The options to be given to the basf2 command
477 @var mode: Whether to run locally or on a cluster
478 @var quiet: No progress bar in quiet mode
479 @var dry: Dry runs do not actually start any scripts (for debugging)
484 The default constructor. Initializes all those variables that will be
485 globally accessible later on. Does not return anything.
497 self.work_folder = os.path.abspath(os.getcwd())
502 self.log = self.create_log()
506 self.scripts: List[Script] = []
509 self.packages: List[str] = []
515 self.ignored_packages = [
"validation-test"]
519 self.basf2_options =
""
534 self.ignore_dependencies =
False
540 self.running_script_reporting_interval = 30
545 self.script_max_runtime_in_minutes = 60 * 5
550 def get_useable_basepath(self):
552 Checks if a local path is available. If only a central release is
553 available, return the path to this central release
555 if self.basepaths[
"local"]:
556 return self.basepaths[
"local"]
558 return self.basepaths[
"central"]
561 def get_available_job_control():
563 insert the possible backend controls, they will be checed via their
564 is_supported method if they actually can be executed in the current
575 def get_available_job_control_names():
576 return [c.name()
for c
in Validation.get_available_job_control()]
578 def build_dependencies(self):
580 This method loops over all Script objects in self.scripts and
581 calls their compute_dependencies()-method.
584 for script_object
in self.scripts:
585 script_object.compute_dependencies(self.scripts)
588 for script_object
in self.scripts:
591 script_object, reason=f
"Depends on '{script_object.path}'"
594 def build_headers(self):
596 This method loops over all Script objects in self.scripts and
597 calls their load_header()-method.
600 for script_object
in self.scripts:
601 script_object.load_header()
603 def skip_script(self, script_object, reason=""):
605 This method sets the status of the given script and all dependent ones
607 @param script_object: Script object to be skipped.
608 @param reason: Reason for skipping object
613 if script_object.status
not in [
617 self.log.warning(
"Skipping " + script_object.path)
619 self.log.debug(f
"Reason for skipping: {reason}.")
623 for dependent_script
in self.scripts:
624 if script_object
in dependent_script.dependencies:
627 reason=f
"Depends on '{script_object.path}'",
630 def create_log(self) -> logging.Logger:
633 We use the logging module to create an object which allows us to
634 comfortably log everything that happens during the execution of
635 this script and even have different levels of importance, such as
641 log = logging.getLogger(
"validate_basf2")
642 log.setLevel(logging.DEBUG)
649 logging.addLevelName(logging.NOTE,
"NOTE")
650 log.note =
lambda msg, *args: log._log(logging.NOTE, msg, args)
658 console_handler = logging.StreamHandler()
659 console_handler.setLevel(logging.NOTE)
662 console_format = logging.Formatter(
"%(message)s")
663 console_handler.setFormatter(console_format)
666 log.addHandler(console_handler)
673 log_dir = self.get_log_folder()
674 if not os.path.exists(log_dir):
675 print(
"Creating " + log_dir)
679 file_handler = logging.FileHandler(
680 os.path.join(log_dir,
"validate_basf2.log"),
"w+"
682 file_handler.setLevel(logging.DEBUG)
686 file_format = logging.Formatter(
687 "%(asctime)s - %(module)s - " "%(levelname)s - %(message)s",
688 datefmt=
"%Y-%m-%d %H:%M:%S",
690 file_handler.setFormatter(file_format)
693 log.addHandler(file_handler)
696 def collect_steering_files(self, interval_selector):
698 This function will collect all steering files from the local and
699 central release directory.
704 validation_folders = get_validation_folders(
705 "local", self.basepaths, self.log
709 for (package, folder)
in get_validation_folders(
710 "central", self.basepaths, self.log
712 if package
not in validation_folders:
713 validation_folders[package] = folder
716 for ignored
in self.ignored_packages:
717 if ignored
in validation_folders:
718 del validation_folders[ignored]
721 self.packages = list(validation_folders.keys())
725 for (package, folder)
in validation_folders.items():
728 c_files = scripts_in_dir(folder, self.log,
".C")
729 py_files = scripts_in_dir(folder, self.log,
".py")
730 for steering_file
in c_files + py_files:
731 script = Script(steering_file, package, self.log)
735 interval_selector.in_interval(script)
736 and not script.noexecute
738 self.scripts.append(script)
743 def get_log_folder(self):
745 Get the log folder for this validation run. The command log
746 files (successful, failed) scripts will be recorded there
750 def log_failed(self):
752 This method logs all scripts with property failed into a single file
753 to be read in run_validation_server.py
756 failed_log_path = os.path.join(
757 self.get_log_folder(),
"list_of_failed_scripts.log"
759 self.log.note(f
"Writing list of failed scripts to {failed_log_path}.")
764 for script
in self.scripts
768 with open(failed_log_path,
"w+")
as list_failed:
770 for script
in failed_scripts:
771 list_failed.write(script.path.split(
"/")[-1] +
"\n")
773 def log_skipped(self):
775 This method logs all scripts with property skipped into a single file
776 to be read in run_validation_server.py
779 skipped_log_path = os.path.join(
780 self.get_log_folder(),
"list_of_skipped_scripts.log"
782 self.log.note(f
"Writing list of skipped scripts to {skipped_log_path}.")
787 for script
in self.scripts
791 with open(skipped_log_path,
"w+")
as list_skipped:
793 for script
in skipped_scripts:
794 list_skipped.write(script.path.split(
"/")[-1] +
"\n")
796 def report_on_scripts(self):
798 Print a summary about all scripts, especially highlighting
799 skipped and failed scripts.
803 script.package +
"/" + script.name
804 for script
in self.scripts
808 script.package +
"/" + script.name
809 for script
in self.scripts
815 terminal_title_line(
"Summary of script execution", level=0)
817 self.log.note(f
"Total number of scripts: {len(self.scripts)}")
821 "{}/{} scripts were skipped".format(
822 len(skipped_scripts), len(self.scripts)
825 for s
in skipped_scripts:
826 self.log.note(f
"* {s}")
829 self.log.note(
"No scripts were skipped. Nice!")
834 "{}/{} scripts failed".format(
835 len(failed_scripts), len(self.scripts)
838 for s
in failed_scripts:
839 self.log.note(f
"* {s}")
842 self.log.note(
"No scripts failed. Nice!")
847 total=len(self.scripts),
848 failure=len(failed_scripts) + len(skipped_scripts),
852 def set_runtime_data(self):
854 This method sets runtime property of each script.
859 with open(path)
as runtimes:
862 for line
in runtimes:
863 run_times[line.split(
"=")[0].strip()] = line.split(
"=")[
868 for script
in self.scripts:
870 script.runtime = float(run_times[script.name])
875 for dict_key
in run_times:
876 suma += float(run_times[dict_key])
877 script.runtime = suma / len(run_times)
879 def get_script_by_name(self, name: str) -> Optional[Script]:
884 l_arr = [s
for s
in self.scripts
if s.name == name]
890 def apply_package_selection(
891 self, selected_packages, ignore_dependencies=False
894 Only select packages from a specific set of packages, but still
895 honor the dependencies to outside scripts which may exist
898 to_keep_dependencies = set()
902 if not ignore_dependencies:
903 for script_obj
in self.scripts:
904 if script_obj.package
in selected_packages:
905 for dep
in script_obj.dependencies:
906 to_keep_dependencies.add(dep.unique_name())
911 for s
in self.scripts
912 if (s.package
in selected_packages)
913 or (s.unique_name()
in to_keep_dependencies)
917 packages = {s.package
for s
in self.scripts}
918 packages_not_found = list(set(selected_packages) - packages)
919 if packages_not_found:
921 "You asked to select the package(s) {}, but they were not "
922 "found.".format(
", ".join(packages_not_found))
925 self.log.warning(msg)
927 def apply_script_selection(
928 self, script_selection, ignore_dependencies=False
931 This method will take the validation file name ( e.g.
932 "FullTrackingValidation.py" ), determine all the script it depends on
933 and set the status of theses scripts to "waiting", The status of all
934 other scripts will be set to "skipped", which means they will not be
935 executed in the validation run. If ignore_dependencies is True,
936 dependencies will also be set to "skipped".
941 Script.sanitize_file_name(s)
for s
in script_selection
944 scripts_to_enable = set()
947 for script
in script_selection:
948 scripts_to_enable.add(script)
949 script_obj = self.get_script_by_name(script)
951 if script_obj
is None:
953 f
"Script with name {script} cannot be found, skipping for "
958 others = script_obj.get_recursive_dependencies(self.scripts)
959 if not ignore_dependencies:
960 scripts_to_enable = scripts_to_enable.union(others)
963 for script_obj
in self.scripts:
964 if script_obj.name
in scripts_to_enable:
966 f
"Enabling script {script_obj.name} because it was "
967 f
"selected or a selected script depends on it."
972 f
"Disabling script {script_obj.name} because it was "
978 script_names = {Script.sanitize_file_name(s.name)
for s
in self.scripts}
979 scripts_not_found = set(script_selection) - script_names
980 if scripts_not_found:
982 "You requested script(s) {}, but they seem to not have "
983 "been found.".format(
", ".join(scripts_not_found))
986 self.log.warning(msg)
988 def apply_script_caching(self):
989 cacheable_scripts = [s
for s
in self.scripts
if s.is_cacheable]
992 self.work_folder, self.tag
995 for s
in cacheable_scripts:
997 outfiles = s.output_files
1000 full_path = os.path.join(output_dir_datafiles, of)
1001 files_exist = files_exist
and os.path.isfile(full_path)
1009 for script
in self.scripts:
1010 for dep_script
in script.dependencies:
1014 script.dependencies.remove(dep_script)
1016 def store_run_results_json(self, git_hash):
1021 for p
in self.packages:
1022 this_package_scrits = [s
for s
in self.scripts
if s.package == p]
1023 json_scripts = [s.to_json(self.tag)
for s
in this_package_scrits]
1029 json_package.append(
1031 p, scriptfiles=json_scripts, fail_count=fail_count
1038 creation_date=datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M"),
1040 packages=json_package,
1045 self.work_folder, self.tag
1050 def add_script(self, script: Script):
1052 Explictly add a script object. In normal operation, scripts are
1053 auto-discovered but this method is useful for testing
1056 self.scripts.append(script)
1059 def sort_scripts(script_list: List[Script]):
1061 Sort the list of scripts that have to be processed by runtime,
1062 execute slow scripts first If no runtime information is available
1063 from the last execution, run the scripts in the validation package
1064 first because they are log running and used as input for other scripts
1067 key=
lambda x: x.runtime
or x.package ==
"validation", reverse=
True
1073 This method runs the actual validation, i.e. it loops over all
1074 scripts, checks which of them are ready for execution, and runs them.
1079 self.log.note(
"Initializing local job control for plotting.")
1080 local_control = localcontrol.Local(
1081 max_number_of_processes=self.parallel
1087 self.log.note(
"Selecting job control for all other jobs.")
1089 selected_controls = [
1090 c
for c
in self.get_available_job_control()
if c.name() == self.mode
1093 if not len(selected_controls) == 1:
1094 print(f
"Selected mode {self.mode} does not exist")
1097 selected_control = selected_controls[0]
1100 "Controller: {} ({})".format(
1101 selected_control.name(), selected_control.description()
1105 if not selected_control.is_supported():
1106 print(f
"Selected mode {self.mode} is not supported on your system")
1110 if selected_control.name() ==
"local":
1111 control = selected_control(max_number_of_processes=self.parallel)
1113 control = selected_control()
1116 src_basepath = self.get_useable_basepath()
1119 f
"Git hash of repository located at {src_basepath} is {git_hash}"
1125 os.path.exists(
"./runtimes.dat")
1126 and os.stat(
"./runtimes.dat").st_size
1128 self.set_runtime_data()
1129 if os.path.exists(
"./runtimes-old.dat"):
1132 os.remove(
"./runtimes-old.dat")
1133 if self.mode ==
"local":
1135 shutil.copyfile(
"./runtimes.dat",
"./runtimes-old.dat")
1139 if self.mode ==
"local":
1140 runtimes = open(
"./runtimes.dat",
"w+")
1144 progress_bar_lines = 0
1148 remaining_scripts = [
1150 for script
in self.scripts
1156 self.sort_scripts(remaining_scripts)
1158 def handle_finished_script(script_obj: Script):
1160 self.log.debug(
"Finished: " + script_obj.path)
1163 script_obj.runtime = time.time() - script_obj.start_time
1164 if self.mode ==
"local":
1166 script_obj.name +
"=" + str(script_obj.runtime) +
"\n"
1171 script_obj.returncode = result[1]
1175 f
"exit_status was {result[1]} for {script_obj.path}"
1177 script_obj.remove_output_files()
1182 reason=
"Script '{}' failed and we set it's status to "
1183 "skipped so that all dependencies are "
1184 "also skipped.".format(script_object.path),
1190 for dependent_script
in remaining_scripts:
1191 if script_obj
in dependent_script.dependencies:
1192 dependent_script.dependencies.remove(script_obj)
1198 for script
in remaining_scripts
1203 for script
in remaining_scripts
1207 "Finished [{},{}]: {} -> {}".format(
1215 def handle_unfinished_script(script_obj: Script):
1217 time.time() - script_obj.last_report_time
1218 ) / 60.0 > self.running_script_reporting_interval:
1220 "Script {} running since {} seconds".format(
1221 script_obj.name_not_sanitized,
1222 time.time() - script_obj.start_time,
1229 script_obj.last_report_time = time.time()
1233 total_runtime_in_minutes = (
1234 time.time() - script_obj.start_time
1237 total_runtime_in_minutes
1238 > self.script_max_runtime_in_minutes
1243 f
"Script {script_obj.path} did not finish after "
1244 f
"{total_runtime_in_minutes} minutes, attempting to "
1248 script_obj.control.terminate(script_obj)
1252 reason=f
"Script '{script_object.path}' did not finish in "
1253 f
"time, so we're setting it to 'failed' so that all "
1254 f
"dependent scripts will be skipped.",
1257 def handle_waiting_script(script_obj: Script):
1260 if script_obj.output_files:
1261 script_obj.control = control
1263 script_obj.control = local_control
1266 if script_obj.control.available():
1269 self.log.debug(
"Starting " + script_obj.path)
1273 self.log.warning(f
"Starting of {script_obj.path} failed")
1278 script_obj.control.execute(
1279 script_obj, self.basf2_options, self.dry, self.tag
1283 script_obj.start_time = time.time()
1284 script_obj.last_report_time = time.time()
1290 for _
in remaining_scripts
1295 for _
in remaining_scripts
1299 "Started [{},{}]: {}".format(
1300 len(waiting), len(running), script_obj.path
1305 while remaining_scripts:
1308 for script_object
in remaining_scripts:
1314 result = script_object.control.is_job_finished(
1320 handle_finished_script(script_object)
1322 handle_unfinished_script(script_object)
1326 elif not script_object.dependencies:
1327 handle_waiting_script(script_object)
1330 remaining_scripts = [
1332 for script
in remaining_scripts
1337 self.sort_scripts(remaining_scripts)
1344 progress_bar_lines = draw_progress_bar(
1345 progress_bar_lines, self.scripts
1353 if self.mode ==
"local":
1357 self.store_run_results_json(git_hash)
1360 def create_plots(self):
1362 This method prepares the html directory for the plots if necessary
1363 and creates the plots that include the results from this validation.
1370 os.makedirs(html_folder, exist_ok=
True)
1372 if not os.path.exists(results_folder):
1374 f
"Folder {results_folder} not found in "
1375 f
"the work directory {self.work_folder}, please run "
1376 f
"b2validation first"
1382 def execute(tag=None, is_test=None):
1384 Parses the command line and executes the full validation suite
1385 :param tag The name that will be used for the current revision.
1386 Default None means automatic.
1387 :param is_test Run in test mode? Default None means that we read this
1388 from the command line arguments (which default to False).
1401 os.environ.get(
"BELLE2_RELEASE_DIR",
None)
is None
1402 and os.environ.get(
"BELLE2_LOCAL_DIR",
None)
is None
1404 sys.exit(
"Error: No basf2 release set up!")
1414 cmd_arguments = parse_cmd_line_arguments(
1415 modes=Validation.get_available_job_control_names()
1421 cmd_arguments.tag = tag
1422 if is_test
is not None:
1423 cmd_arguments.test = is_test
1426 validation = Validation(cmd_arguments.tag)
1429 validation.log.note(
"Starting validation...")
1430 validation.log.note(
1431 f
'Results will stored in a folder named "{validation.tag}"...'
1433 validation.log.note(
1434 "The (full) log file(s) can be found at {}".format(
1435 ", ".join(get_log_file_paths(validation.log))
1438 validation.log.note(
1439 "Please check these logs when encountering "
1440 "unexpected results, as most of the warnings and "
1441 "errors are not written to stdout/stderr."
1445 if cmd_arguments.options:
1446 validation.basf2_options =
" ".join(cmd_arguments.options)
1447 validation.log.note(
1448 f
"Received arguments for basf2: {validation.basf2_options}"
1452 validation.mode = cmd_arguments.mode
1455 validation.parallel = cmd_arguments.parallel
1458 if cmd_arguments.quiet:
1459 validation.log.note(
"Running in quiet mode (no progress bar).")
1460 validation.quiet =
True
1463 if cmd_arguments.dry:
1464 validation.log.note(
1465 "Performing a dry run; no scripts will be " "started."
1467 validation.dry =
True
1470 if cmd_arguments.test:
1471 validation.log.note(
"Running in test mode")
1472 validation.ignored_packages = []
1473 cmd_arguments.packages = [
"validation-test"]
1475 validation.log.note(
1476 "Release Folder: {}".format(validation.basepaths[
"central"])
1478 validation.log.note(
1479 "Local Folder: {}".format(validation.basepaths[
"local"])
1483 validation.log.note(
"Collecting steering files...")
1484 intervals = cmd_arguments.intervals.split(
",")
1488 validation.log.note(
"Building headers for Script objects...")
1489 validation.build_headers()
1493 if not cmd_arguments.select_ignore_dependencies:
1494 validation.log.note(
"Building dependencies for Script objects...")
1495 validation.build_dependencies()
1497 if cmd_arguments.packages:
1498 validation.log.note(
1499 "Applying package selection for the following package(s): "
1500 +
", ".join(cmd_arguments.packages)
1502 validation.apply_package_selection(cmd_arguments.packages)
1505 if cmd_arguments.select:
1506 validation.log.note(
"Applying selection for validation scripts")
1507 validation.apply_script_selection(
1508 cmd_arguments.select, ignore_dependencies=
False
1513 if cmd_arguments.select_ignore_dependencies:
1514 validation.log.note(
1515 "Applying selection for validation scripts, "
1516 "ignoring their dependencies"
1518 validation.apply_script_selection(
1519 cmd_arguments.select_ignore_dependencies,
1520 ignore_dependencies=
True,
1525 if cmd_arguments.use_cache:
1526 validation.log.note(
"Checking for cached script output")
1527 validation.apply_script_caching()
1530 if cmd_arguments.max_run_time
is not None:
1531 if cmd_arguments.max_run_time > 0:
1532 validation.log.note(
1533 f
"Setting maximal run time of the steering files "
1534 f
"to {cmd_arguments.max_run_time} minutes."
1537 validation.log.note(
1538 "Disabling run time limitation of steering files as "
1539 "requested (max run time set to <= 0)."
1541 validation.script_max_runtime_in_minutes = (
1542 cmd_arguments.max_run_time
1546 validation.log.note(
"Starting the validation...")
1547 validation.run_validation()
1550 validation.log.note(
"Validation finished...")
1551 if not validation.dry:
1552 validation.log.note(
"Start creating plots...")
1553 validation.create_plots()
1554 validation.log.note(
"Plots have been created...")
1556 if cmd_arguments.send_mails:
1558 validation.log.note(
"Start sending mails...")
1560 if cmd_arguments.send_mails_mode ==
"incremental":
1562 elif cmd_arguments.send_mails_mode ==
"full":
1566 mails.send_all_mails(incremental=incremental)
1567 validation.log.note(
1568 "Save mail data to {}".format(validation.get_log_folder())
1573 validation.log.note(
1574 "Skipping plot creation and mailing " "(dry run)..."
1577 validation.report_on_scripts()
1580 validation.log.note(
1581 "Validation finished! Total runtime: {}s".format(
1582 int(timeit.default_timer() - get_start_time())
1586 if cmd_arguments.view:
1590 except KeyboardInterrupt:
1591 validation.log.note(
"Validation terminated by user!")
Provides functionality to send mails in case of failed scripts / validation plots.
intervals
stores the intervals which have been selected
bool in_interval(self, Script script_object)
def __init__(self, intervals)
Optional[str] get_compact_git_hash(str repo_folder)
str congratulator(Optional[Union[int, float]] success=None, Optional[Union[int, float]] failure=None, Optional[Union[int, float]] total=None, just_comment=False, rate_name="Success rate")
def get_results_tag_folder(output_base_dir, tag)
def get_results_tag_revision_file(output_base_dir, tag)
def get_html_folder(output_base_dir)
def get_results_folder(output_base_dir)
def get_results_runtime_file(output_base_dir)
def create_plots(revisions=None, force=False, Optional[Queue] process_queue=None, work_folder=".")
def run_server(ip="127.0.0.1", port=8000, parse_command_line=False, open_site=False, dry_run=False)