5 from basf2
import statistics
6 from ROOT
import PyConfig
7 PyConfig.IgnoreCommandLineOptions =
True
19 from typing
import List
27 from validationscript
import Script, ScriptStatus
28 from validationfunctions
import get_start_time, get_validation_folders, \
29 scripts_in_dir, parse_cmd_line_arguments, get_log_file_paths, \
31 import validationfunctions
33 import validationserver
34 import validationplots
35 import validationscript
41 import clustercontrolsge
42 import clustercontroldrmaa
45 pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
57 Add memory usage and execution time validation plots to the given root
58 file. The current root file will be used if the fileName is empty (
62 if not timing_methods:
63 timing_methods = [statistics.INIT, statistics.EVENT]
65 memory_methds = [statistics.INIT, statistics.EVENT]
68 save_dir = ROOT.gDirectory
71 plot_file = ROOT.TFile.Open(file_name,
'UPDATE')
74 job_desc = sys.argv[1]
78 h_global_timing = ROOT.TH1D(
79 prefix +
'GlobalTiming',
83 h_global_timing.SetStats(0)
84 h_global_timing.GetXaxis().SetTitle(
'method')
85 h_global_timing.GetYaxis().SetTitle(
'time/call [ms]')
86 h_global_timing.GetListOfFunctions().Add(
89 """The (average) time of the different basf2 execution phases
90 for {}. The error bars show the rms of the time
91 distributions.""".format(job_desc)
94 h_global_timing.GetListOfFunctions().Add(
97 """There should be no significant and persistent increases in
98 the the run time of the methods. Only cases where the increase
99 compared to the reference or previous versions persists for at
100 least two consecutive revisions should be reported since the
101 measurements can be influenced by load from other processes on
102 the execution host."""
106 h_global_timing.GetListOfFunctions().Add(
107 ROOT.TNamed(
'Contact', contact)
109 for (index, method)
in statistics.StatisticCounters.values.items():
110 method_name[method] = str(method)[0] \
111 + str(method).lower()[1:].replace(
'_r',
'R')
114 h_global_timing.SetBinContent(
116 statistics.get_global().time_mean(method) * 1e-6
118 h_global_timing.SetBinError(
120 statistics.get_global().time_stddev(method) * 1e-6
122 h_global_timing.GetXaxis().SetBinLabel(
126 h_global_timing.Write()
129 modules = statistics.modules
130 h_module_timing = ROOT.TH1D(
131 prefix +
'ModuleTiming',
133 len(modules), 0, len(modules)
135 h_module_timing.SetStats(0)
136 h_module_timing.GetXaxis().SetTitle(
'module')
137 h_module_timing.GetYaxis().SetTitle(
'time/call [ms]')
138 h_module_timing.GetListOfFunctions().Add(
141 """There should be no significant and persistent increases in
142 the run time of a module. Only cases where the increase compared
143 to the reference or previous versions persists for at least two
144 consecutive revisions should be reported since the measurements
145 can be influenced by load from other processes on the execution
148 h_module_timing.GetListOfFunctions().Add(ROOT.TNamed(
151 for method
in timing_methods:
152 h_module_timing.SetTitle(
'Module %s Timing' % method_name[method])
153 h_module_timing.GetListOfFunctions().Add(
156 """The (average) execution time of the %s method of modules
157 for %s. The error bars show the rms of the time
159 (method_name[method],
162 for modstat
in modules:
163 h_module_timing.SetBinContent(
164 index, modstat.time_mean(method) * 1e-6)
165 h_module_timing.SetBinError(
166 index, modstat.time_stddev(method) * 1e-6)
167 h_module_timing.GetXaxis().SetBinLabel(
170 h_module_timing.Write(
'%s%sTiming' % (prefix, method_name[method]))
171 h_module_timing.GetListOfFunctions().RemoveLast()
174 memory_profile = ROOT.Belle2.PyStoreObj(
'VirtualMemoryProfile', 1)
176 memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
178 f
'The virtual memory usage vs. the event number for {job_desc}.')
180 memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
182 """The virtual memory usage should be flat for high event
183 numbers. If it keeps rising this is an indication of a memory
184 leak.<br>There should also be no significant increases with
185 respect to the reference (or previous revisions if no reference
189 memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
192 memory_profile.obj().Write(prefix +
'VirtualMemoryProfile')
195 memory_profile = ROOT.Belle2.PyStoreObj(
'RssMemoryProfile', 1)
197 memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
199 f
'The rss memory usage vs. the event number for {job_desc}.')
201 memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
203 """The rss memory usage should be flat for high event numbers.
204 If it keeps rising this is an indication of a memory
205 leak.<br>There should also be no significant increases with
206 respect to the reference (or previous revisions if no reference
207 exists). In the (rare) case that memory is swapped by the OS,
208 the rss memory usage can decrease.""")
211 memory_profile.obj().GetListOfFunctions().Add(ROOT.TNamed(
214 memory_profile.obj().Write(prefix +
'RssMemoryProfile')
217 sqrt_n = 1 / math.sqrt(statistics.get_global().calls() - 1)
218 h_module_memory = ROOT.TH1D(
219 prefix +
'ModuleMemory',
220 'Virtual Module Memory',
221 len(modules), 0, len(modules)
223 h_module_memory.SetStats(0)
224 h_module_memory.GetXaxis().SetTitle(
'module')
225 h_module_memory.GetYaxis().SetTitle(
'memory increase/call [kB]')
226 h_module_memory.GetListOfFunctions().Add(ROOT.TNamed(
228 f
'The (average) increase in virtual memory usage per call of the '
229 f
'{method_name[method]} method of modules for {job_desc}.')
231 h_module_memory.GetListOfFunctions().Add(ROOT.TNamed(
233 'The increase in virtual memory usage per call for each module '
234 'should be consistent with zero or the reference.')
237 h_module_memory.GetListOfFunctions().Add(ROOT.TNamed(
240 for method
in memory_methds:
241 h_module_memory.SetTitle(
'Module %s Memory' % method_name[method])
243 for modstat
in modules:
244 h_module_memory.SetBinContent(index, modstat.memory_mean(method))
245 h_module_memory.SetBinError(
247 modstat.memory_stddev(method) * sqrt_n
249 h_module_memory.GetXaxis().SetBinLabel(index, modstat.name)
251 h_module_memory.Write(
'%s%sMemory' % (prefix, method_name[method]))
252 h_module_memory.GetListOfFunctions().RemoveLast()
259 def event_timing_plot(
269 Add a validation histogram of event execution time to the given root file.
270 The current root file will be used if the fileName is empty (default).
271 The data file has to contain the profile information created by the Profile
276 job_desc = os.path.basename(sys.argv[0])
279 save_dir = ROOT.gDirectory
280 data = ROOT.TFile.Open(data_file)
281 tree = data.Get(
"tree")
282 entries = tree.GetEntries()
283 tree.Draw(
'Entry$>>hEventTime(%d,-0.5,%d.5)' % (entries, entries - 1),
284 'ProfileInfo.m_timeInSec',
'goff')
286 h_event_time = data.Get(
"hEventTime")
287 h_event_time.SetDirectory(0)
294 plot_file = ROOT.TFile.Open(file_name,
'UPDATE')
297 stat = ROOT.gStyle.GetOptStat()
298 ROOT.gStyle.SetOptStat(101110)
299 h_timing = ROOT.TH1D(prefix +
'Timing',
'Event Timing', 100, 0, max_time)
300 h_timing.UseCurrentStyle()
301 h_timing.GetXaxis().SetTitle(
'time [s]')
302 h_timing.GetYaxis().SetTitle(
'events')
303 h_timing.GetListOfFunctions().Add(ROOT.TNamed(
305 f
'The distribution of event execution times for {job_desc}.')
307 h_timing.GetListOfFunctions().Add(ROOT.TNamed(
309 'The distribution should be consistent with the reference (or '
310 'previous revisions if no reference exists).')
313 h_timing.GetListOfFunctions().Add(ROOT.TNamed(
'Contact', contact))
314 for event
in range(1 + burn_in, entries + 1):
316 h_event_time.GetBinContent(event) -
317 h_event_time.GetBinContent(event - 1)
320 ROOT.gStyle.SetOptStat(stat)
327 def draw_progress_bar(delete_lines: int, scripts: List[Script], barlength=50):
329 This function plots a progress bar of the validation, i.e. it shows which
330 percentage of the scripts has been executed yet.
331 It furthermore also shows which scripts are currently running, as well as
332 the total runtime of the validation.
334 @param delete_lines: The amount of lines which need to be deleted before
335 we can redraw the progress bar
336 @param scripts: List of all Script obejcts
337 @param barlength: The length of the progess bar (in characters)
338 @return: The number of lines that were printed by this function call.
339 Usefule if this function is called repeatedly.
343 finished_scripts = len([
351 all_scripts = len(scripts)
352 percent = 100.0 * finished_scripts / all_scripts
355 runtime = int(timeit.default_timer() - get_start_time())
358 for i
in range(delete_lines):
359 print(
"\x1b[2K \x1b[1A", end=
' ')
363 for i
in range(barlength):
364 if i < int(barlength * percent / 100.0):
369 f
'\x1b[0G[{progressbar}] {percent:6.1f}% '
370 f
'({finished_scripts}/{all_scripts})')
373 print(f
'Runtime: {runtime}s')
376 running = [os.path.basename(__.path)
for __
in scripts
383 print(f
'Running: {running[0]}')
384 for __
in running[1:]:
385 print(
'{0} {1}'.format(len(
'Running:') *
" ", __))
387 return len(running) + 2
392 This can be used to parse the execution intervals of validation scripts
393 and can check whether a script object is in the list of intervals
394 configured in this class.
399 Initializes the IntervalSelector class with a list of intervals which
408 checks whether the interval listed in a script object's header is
413 script_interval =
"nightly"
415 if script_object.header
is not None:
416 if "interval" in script_object.header:
417 script_interval = script_object.header[
"interval"]
432 This is the class that provides all global variables, like 'list_of_files'
433 etc. There is only one instance of this class with name 'validation. This
434 allows to use some kind of namespace, i.e. global variables will always be
435 referenced as validation.[name of variable]. This makes it easier to
436 distinguish them from local variables that only exist within the scope of a
439 @var tag: The name of the folder within the results directory
440 @var log: Reference to the logging object for this validation instance
441 @var basepaths: The paths to the local and central release directory
442 @var scripts: List of all Script objects for steering files
443 @var packages: List of all packages which contributed scripts
444 @var basf2_options: The options to be given to the basf2 command
445 @var mode: Whether to run locally or on a cluster
446 @var quiet: No progress bar in quiet mode
447 @var dry: Dry runs do not actually start any scripts (for debugging)
452 The default constructor. Initializes all those variables that will be
453 globally accessible later on. Does not return anything.
465 self.work_folder = os.path.abspath(os.getcwd())
484 self.ignored_packages = [
"validation-test"]
488 self.basf2_options =
''
503 self.ignore_dependencies =
False
509 self.running_script_reporting_interval = 30
514 self.script_max_runtime_in_minutes = 60*5
519 def get_useable_basepath(self):
521 Checks if a local path is available. If only a central release is
522 available, return the path to this central release
524 if self.basepaths[
"local"]:
525 return self.basepaths[
"local"]
527 return self.basepaths[
"central"]
530 def get_available_job_control():
532 insert the possible backend controls, they will be checed via their
533 is_supported method if they actually can be executed in the current
536 return [localcontrol.Local,
542 def get_available_job_control_names():
543 return [c.name()
for c
in Validation.get_available_job_control()]
545 def build_dependencies(self):
547 This method loops over all Script objects in self.scripts and
548 calls their compute_dependencies()-method.
551 for script_object
in self.scripts:
552 script_object.compute_dependencies(self.scripts)
555 for script_object
in self.scripts:
559 reason=
"Depends on '{}'".format(script_object.path)
562 def build_headers(self):
564 This method loops over all Script objects in self.scripts and
565 calls their load_header()-method.
568 for script_object
in self.scripts:
569 script_object.load_header()
571 def skip_script(self, script_object, reason=""):
573 This method sets the status of the given script and all dependent ones
575 @param script_object: Script object to be skipped.
576 @param reason: Reason for skipping object
583 self.log.warning(
'Skipping ' + script_object.path)
585 self.log.debug(
"Reason for skipping: {}.".format(reason))
589 for dependent_script
in self.scripts:
590 if script_object
in dependent_script.dependencies:
593 reason=
"Depends on '{}'".format(script_object.path)
596 def create_log(self):
599 We use the logging module to create an object which allows us to
600 comfortably log everything that happens during the execution of
601 this script and even have different levels of importance, such as
607 self.log = logging.getLogger(
'validate_basf2')
608 self.log.setLevel(logging.DEBUG)
615 logging.addLevelName(logging.NOTE,
'NOTE')
616 self.log.note =
lambda msg, *args: self.log._log(logging.NOTE,
625 console_handler = logging.StreamHandler()
626 console_handler.setLevel(logging.NOTE)
629 console_format = logging.Formatter(
'%(message)s')
630 console_handler.setFormatter(console_format)
633 self.log.addHandler(console_handler)
640 log_dir = self.get_log_folder()
641 if not os.path.exists(log_dir):
642 print(
"Creating " + log_dir)
646 file_handler = logging.FileHandler(
647 os.path.join(log_dir,
'validate_basf2.log'),
650 file_handler.setLevel(logging.DEBUG)
654 file_format = logging.Formatter(
'%(asctime)s - %(module)s - '
655 '%(levelname)s - %(message)s',
656 datefmt=
'%Y-%m-%d %H:%M:%S')
657 file_handler.setFormatter(file_format)
660 self.log.addHandler(file_handler)
662 def collect_steering_files(self, interval_selector):
664 This function will collect all steering files from the local and
665 central release directory.
670 validation_folders = get_validation_folders(
671 'local', self.basepaths, self.log)
674 for (package, folder)
in get_validation_folders(
675 'central', self.basepaths, self.log).items():
676 if package
not in validation_folders:
677 validation_folders[package] = folder
680 for ignored
in self.ignored_packages:
681 if ignored
in validation_folders:
682 del validation_folders[ignored]
685 self.packages = list(validation_folders.keys())
689 for (package, folder)
in validation_folders.items():
692 c_files = scripts_in_dir(folder, self.log,
'.C')
693 py_files = scripts_in_dir(folder, self.log,
'.py')
694 for steering_file
in c_files + py_files:
695 script = Script(steering_file, package, self.log)
699 if interval_selector.in_interval(script):
700 self.scripts.append(script)
705 def get_log_folder(self):
707 Get the log folder for this validation run. The command log
708 files (successful, failed) scripts will be recorded there
713 def log_failed(self):
715 This method logs all scripts with property failed into a single file
716 to be read in run_validation_server.py
719 failed_log_path = os.path.join(
720 self.get_log_folder(),
721 "list_of_failed_scripts.log"
723 self.log.note(f
"Writing list of failed scripts to {failed_log_path}.")
727 script
for script
in self.scripts
731 with open(failed_log_path,
"w+")
as list_failed:
733 for script
in failed_scripts:
734 list_failed.write(script.path.split(
"/")[-1] +
"\n")
736 def log_skipped(self):
738 This method logs all scripts with property skipped into a single file
739 to be read in run_validation_server.py
742 skipped_log_path = os.path.join(
743 self.get_log_folder(),
744 "list_of_skipped_scripts.log"
747 f
"Writing list of skipped scripts to {skipped_log_path}."
752 script
for script
in self.scripts
756 with open(skipped_log_path,
"w+")
as list_skipped:
758 for script
in skipped_scripts:
759 list_skipped.write(script.path.split(
"/")[-1] +
"\n")
761 def report_on_scripts(self):
763 Print a summary about all scripts, especially highlighting
764 skipped and failed scripts.
768 script.package +
"/" + script.name
for script
in self.scripts
772 script.package +
"/" + script.name
for script
in self.scripts
777 self.log.note(terminal_title_line(
778 "Summary of script execution", level=0
780 self.log.note(f
"Total number of scripts: {len(self.scripts)}")
783 self.log.note(
"{}/{} scripts were skipped".format(
784 len(skipped_scripts), len(self.scripts)))
785 for s
in skipped_scripts:
786 self.log.note(
"* {}".format(s))
789 self.log.note(
"No scripts were skipped. Nice!")
793 self.log.note(
"{}/{} scripts failed".format(
794 len(failed_scripts), len(self.scripts)))
795 for s
in failed_scripts:
796 self.log.note(f
"* {s}")
799 self.log.note(
"No scripts failed. Nice!")
803 total=len(self.scripts),
804 failure=len(failed_scripts) + len(skipped_scripts)
807 def set_runtime_data(self):
809 This method sets runtime property of each script.
814 with open(path,
"r")
as runtimes:
817 for line
in runtimes:
818 run_times[line.split(
"=")[0].strip()] = \
819 line.split(
"=")[1].strip()
822 for script
in self.scripts:
824 script.runtime = float(run_times[script.name])
829 for dict_key
in run_times:
830 suma += float(run_times[dict_key])
831 script.runtime = suma / len(run_times)
833 def get_script_by_name(self, name):
838 l = [s
for s
in self.scripts
if s.name == name]
844 def apply_package_selection(self, selected_packages,
845 ignore_dependencies=False):
847 Only select packages from a specific set of packages, but still
848 honor the dependencies to outside scripts which may exist
851 to_keep_dependencies = set()
855 if not ignore_dependencies:
856 for script_obj
in self.scripts:
857 if script_obj.package
in selected_packages:
858 for dep
in script_obj.dependencies:
859 to_keep_dependencies.add(dep.unique_name())
863 s
for s
in self.scripts
if (
864 s.package
in selected_packages)
or (
865 s.unique_name()
in to_keep_dependencies)]
868 packages = set(s.package
for s
in self.scripts)
869 packages_not_found = list(set(selected_packages) - packages)
870 if packages_not_found:
871 msg =
"You asked to select the package(s) {}, but they were not " \
872 "found.".format(
', '.join(packages_not_found))
874 self.log.warning(msg)
876 def apply_script_selection(self, script_selection,
877 ignore_dependencies=False):
879 This method will take the validation file name ( e.g.
880 "FullTrackingValidation.py" ), determine all the script it depends on
881 and set the status of theses scripts to "waiting", The status of all
882 other scripts will be set to "skipped", which means they will not be
883 executed in the validation run. If ignore_dependencies is True,
884 dependencies will also be set to "skipped".
889 Script.sanitize_file_name(s)
for s
in script_selection
892 scripts_to_enable = set()
895 for script
in script_selection:
896 scripts_to_enable.add(script)
897 script_obj = self.get_script_by_name(script)
899 if script_obj
is None:
901 f
"Script with name {script} cannot be found, skipping for "
906 others = script_obj.get_recursive_dependencies(self.scripts)
907 if not ignore_dependencies:
908 scripts_to_enable = scripts_to_enable.union(others)
911 for script_obj
in self.scripts:
912 if script_obj.name
in scripts_to_enable:
914 f
"Enabling script {script_obj.name} because it was "
915 f
"selected or a selected script depends on it."
920 f
"Disabling script {script_obj.name} because it was "
927 Script.sanitize_file_name(s.name)
for s
in self.scripts
929 scripts_not_found = set(script_selection) - script_names
930 if scripts_not_found:
931 msg =
"You requested script(s) {}, but they seem to not have " \
932 "been found.".format(
", ".join(scripts_not_found))
934 self.log.warning(msg)
936 def apply_script_caching(self):
937 cacheable_scripts = [s
for s
in self.scripts
if s.is_cacheable()]
940 self.work_folder, self.tag)
942 for s
in cacheable_scripts:
944 outfiles = s.get_output_files()
947 full_path = os.path.join(output_dir_datafiles, of)
948 files_exist = files_exist
and os.path.isfile(full_path)
956 for script
in self.scripts:
957 for dep_script
in script.dependencies:
961 script.dependencies.remove(dep_script)
963 def store_run_results_json(self, git_hash):
968 for p
in self.packages:
969 this_package_scrits = [
970 s
for s
in self.scripts
if s.package == p
972 json_scripts = [s.to_json(self.tag)
for s
in this_package_scrits]
980 scriptfiles=json_scripts,
981 fail_count=fail_count)
987 creation_date=datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M"),
989 packages=json_package,
994 self.work_folder, self.tag
999 def add_script(self, script):
1001 Explictly add a script object. In normal operation, scripts are
1002 auto-discovered but this method is useful for testing
1005 self.scripts.append(script)
1008 def sort_scripts(script_list):
1010 Sort the list of scripts that have to be processed by runtime,
1011 execute slow scripts first If no runtime information is available
1012 from the last execution, run the scripts in the validation package
1013 first because they are log running and used as input for other scripts
1016 key=
lambda x: x.runtime
or x.package ==
"validation",
1021 def run_validation(self):
1023 This method runs the actual validation, i.e. it loops over all
1024 scripts, checks which of them are ready for execution, and runs them.
1029 self.log.note(
"Initializing local job control for plotting.")
1030 local_control = localcontrol.\
1031 Local(max_number_of_processes=self.parallel)
1036 self.log.note(
"Selecting job control for all other jobs.")
1038 selected_controls = [
1039 c
for c
in self.get_available_job_control()
1040 if c.name() == self.mode
1043 if not len(selected_controls) == 1:
1044 print(f
"Selected mode {self.mode} does not exist")
1047 selected_control = selected_controls[0]
1049 self.log.note(
"Controller: {} ({})".format(
1050 selected_control.name(),
1051 selected_control.description()
1054 if not selected_control.is_supported():
1055 print(f
"Selected mode {self.mode} is not supported on your system")
1059 if selected_control.name() ==
"local":
1060 control = selected_control(max_number_of_processes=self.parallel)
1062 control = selected_control()
1065 src_basepath = self.get_useable_basepath()
1068 f
"Git hash of repository located at {src_basepath} is {git_hash}"
1073 if os.path.exists(
"./runtimes.dat")
and \
1074 os.stat(
"./runtimes.dat").st_size:
1075 self.set_runtime_data()
1076 if os.path.exists(
"./runtimes-old.dat"):
1079 os.remove(
"./runtimes-old.dat")
1080 if self.mode ==
"local":
1082 shutil.copyfile(
"./runtimes.dat",
"./runtimes-old.dat")
1086 if self.mode ==
"local":
1087 runtimes = open(
'./runtimes.dat',
'w+')
1091 progress_bar_lines = 0
1095 remaining_scripts = [script
for script
in self.scripts
1100 self.sort_scripts(remaining_scripts)
1102 def handle_finished_script(script_obj: Script):
1104 self.log.debug(
'Finished: ' + script_obj.path)
1107 script_obj.runtime = time.time() - script_obj.start_time
1108 if self.mode ==
"local":
1109 runtimes.write(script_obj.name +
"=" +
1110 str(script_obj.runtime) +
"\n")
1114 script_obj.returncode = result[1]
1118 f
'exit_status was {result[1]} for {script_obj.path}'
1124 reason=
"Script '{}' failed and we set it's status to "
1125 "skipped so that all dependencies are "
1126 "also skipped.".format(script_object.path)
1132 for dependent_script
in remaining_scripts:
1133 if script_obj
in dependent_script.dependencies:
1134 dependent_script.dependencies.remove(script_obj)
1138 waiting = [script
for script
in remaining_scripts
1140 running = [script
for script
in remaining_scripts
1143 'Finished [{0},{1}]: {2} -> {3}'.format(
1151 def handle_unfinished_script(script_obj: Script):
1152 if (time.time() - script_obj.last_report_time) / 60.0 > \
1153 self.running_script_reporting_interval:
1155 "Script {} running since {} seconds".format(
1156 script_obj.name_not_sanitized,
1157 time.time() - script_obj.start_time))
1162 script_obj.last_report_time = time.time()
1166 total_runtime_in_minutes = \
1167 (time.time() - script_obj.start_time) / 60.0
1168 if total_runtime_in_minutes > self.script_max_runtime_in_minutes > 0:
1171 f
'Script {script_obj.path} did not finish after '
1172 f
'{total_runtime_in_minutes} minutes, attempting to '
1176 script_obj.control.terminate(script_obj)
1180 reason=f
"Script '{script_object.path}' did not finish in "
1181 f
"time, so we're setting it to 'failed' so that all "
1182 f
"dependent scripts will be skipped."
1185 def handle_waiting_script(script_obj: Script):
1188 if script_obj.header
and \
1189 script_obj.header.get(
'output', []):
1190 script_obj.control = control
1192 script_obj.control = local_control
1195 if script_obj.control.available():
1198 self.log.debug(
'Starting ' + script_obj.path)
1203 f
'Starting of {script_obj.path} failed'
1209 script_obj.control.execute(
1217 script_obj.start_time = time.time()
1218 script_obj.last_report_time = time.time()
1222 waiting = [_
for _
in remaining_scripts
1224 running = [_
for _
in remaining_scripts
1226 print(
'Started [{0},{1}]: {2}'.format(
1227 len(waiting), len(running),
1232 while remaining_scripts:
1235 for script_object
in remaining_scripts:
1241 result = script_object.control.\
1242 is_job_finished(script_object)
1246 handle_finished_script(script_object)
1248 handle_unfinished_script(script_object)
1252 elif not script_object.dependencies:
1253 handle_waiting_script(script_object)
1256 remaining_scripts = [
1257 script
for script
in remaining_scripts
1263 self.sort_scripts(remaining_scripts)
1270 progress_bar_lines = draw_progress_bar(
1280 if self.mode ==
"local":
1284 self.store_run_results_json(git_hash)
1287 def create_plots(self):
1289 This method prepares the html directory for the plots if necessary
1290 and creates the plots that include the results from this validation.
1299 os.makedirs(html_folder, exist_ok=
True)
1301 if not os.path.exists(results_folder):
1303 f
"Folder {results_folder} not found in "
1304 f
"the work directory {self.work_folder}, please run "
1305 f
"b2validation first"
1311 def execute(tag=None, is_test=None):
1313 Parses the command line and executes the full validation suite
1314 :param tag The name that will be used for the current revision.
1315 Default None means automatic.
1316 :param is_test Run in test mode? Default None means that we read this
1317 from the command line arguments (which default to False).
1329 if os.environ.get(
'BELLE2_RELEASE_DIR',
None)
is None and os.environ.get(
'BELLE2_LOCAL_DIR',
None)
is None:
1330 sys.exit(
'Error: No basf2 release set up!')
1340 cmd_arguments = parse_cmd_line_arguments(
1341 modes=Validation.get_available_job_control_names()
1347 cmd_arguments.tag = tag
1348 if is_test
is not None:
1349 cmd_arguments.test = is_test
1352 validation = Validation(cmd_arguments.tag)
1355 validation.log.note(
'Starting validation...')
1356 validation.log.note(
1357 f
'Results will stored in a folder named "{validation.tag}"...')
1358 validation.log.note(
'The (full) log file(s) can be found at {}'.format(
1359 ', '.join(get_log_file_paths(validation.log))
1361 validation.log.note(
"Please check these logs when encountering "
1362 "unexpected results, as most of the warnings and "
1363 "errors are not written to stdout/stderr.")
1366 if cmd_arguments.options:
1367 validation.basf2_options =
' '.join(cmd_arguments.options)
1368 validation.log.note(
1369 f
'Received arguments for basf2: {validation.basf2_options}'
1373 validation.mode = cmd_arguments.mode
1376 validation.parallel = cmd_arguments.parallel
1379 if cmd_arguments.quiet:
1380 validation.log.note(
"Running in quiet mode (no progress bar).")
1381 validation.quiet =
True
1384 if cmd_arguments.dry:
1385 validation.log.note(
"Performing a dry run; no scripts will be "
1387 validation.dry =
True
1390 if cmd_arguments.test:
1391 validation.log.note(
'Running in test mode')
1392 validation.ignored_packages = []
1393 cmd_arguments.packages = [
"validation-test"]
1395 validation.log.note(
1396 "Release Folder: {}".format(validation.basepaths[
"central"])
1398 validation.log.note(
1399 "Local Folder: {}".format(validation.basepaths[
"local"])
1403 validation.log.note(
'Collecting steering files...')
1404 intervals = cmd_arguments.intervals.split(
",")
1408 validation.log.note(
'Building headers for Script objects...')
1409 validation.build_headers()
1413 if not cmd_arguments.select_ignore_dependencies:
1414 validation.log.note(
'Building dependencies for Script objects...')
1415 validation.build_dependencies()
1417 if cmd_arguments.packages:
1418 validation.log.note(
1419 "Applying package selection for the following package(s): " +
1420 ", ".join(cmd_arguments.packages)
1422 validation.apply_package_selection(cmd_arguments.packages)
1425 if cmd_arguments.select:
1426 validation.log.note(
"Applying selection for validation scripts")
1427 validation.apply_script_selection(cmd_arguments.select,
1428 ignore_dependencies=
False)
1432 if cmd_arguments.select_ignore_dependencies:
1433 validation.log.note(
"Applying selection for validation scripts, "
1434 "ignoring their dependencies")
1435 validation.apply_script_selection(
1436 cmd_arguments.select_ignore_dependencies,
1437 ignore_dependencies=
True
1442 if cmd_arguments.use_cache:
1443 validation.log.note(
"Checking for cached script output")
1444 validation.apply_script_caching()
1447 if cmd_arguments.max_run_time
is not None:
1448 if cmd_arguments.max_run_time > 0:
1449 validation.log.note(
1450 f
"Setting maximal run time of the steering files "
1451 f
"to {cmd_arguments.max_run_time} minutes."
1454 validation.log.note(
1455 "Disabling run time limitation of steering files as "
1456 "requested (max run time set to <= 0)."
1458 validation.script_max_runtime_in_minutes = \
1459 cmd_arguments.max_run_time
1462 validation.log.note(
'Starting the validation...')
1463 validation.run_validation()
1466 validation.log.note(
'Validation finished...')
1467 if not validation.dry:
1468 validation.log.note(
'Start creating plots...')
1469 validation.create_plots()
1470 validation.log.note(
'Plots have been created...')
1472 if cmd_arguments.send_mails:
1473 mails = mail_log.Mails(validation)
1474 validation.log.note(
'Start sending mails...')
1476 if cmd_arguments.send_mails_mode ==
"incremental":
1478 elif cmd_arguments.send_mails_mode ==
"full":
1482 mails.send_all_mails(
1483 incremental=incremental
1485 validation.log.note(
1486 'Save mail data to {}'.format(
1487 validation.get_log_folder()
1493 validation.log.note(
'Skipping plot creation and mailing '
1496 validation.report_on_scripts()
1499 validation.log.note(
1500 'Validation finished! Total runtime: {0}s'.format(
1501 int(timeit.default_timer() - get_start_time())
1505 if cmd_arguments.view:
1509 except KeyboardInterrupt:
1510 validation.log.note(
'Validation terminated by user!')