Belle II Software development
validationplots.py
1#!/usr/bin/env python3
2
3
10
11# Normal imports
12import glob
13import os
14import re
15import sys
16import queue
17from typing import Dict, Any, List, Union, Optional
18import collections
19from multiprocessing import Queue
20
21# Load ROOT
22import ROOT
23
24# In case some ROOT files loaded by the validation scripts contain some
25# RooFit objects, ROOT will auto-load RooFit. Due to some (yet not
26# understood) tear down problem, this results in this error:
27# Fatal in <TClass::SetUnloaded>: The TClass for map<TString,double> is being
28# unloaded when in state 3 To prevent this, we are loading RooFit here
29# before ROOT has a chance to do this
30from ROOT import RooFit # noqa
31
32# The pretty printer. Print prettier :)
33import pprint
34import json_objects
35
36from basf2 import B2ERROR
37import validationpath
38from validationplotuple import Plotuple
39from validationfunctions import (
40 index_from_revision,
41 get_style,
42 available_revisions,
43 terminal_title_line,
44)
45import validationfunctions
46
47from validationrootobject import RootObject
48
49
50# Only execute the program if a basf2 release is set up!
51if (
52 os.environ.get("BELLE2_RELEASE_DIR", None) is None
53 and os.environ.get("BELLE2_LOCAL_DIR", None) is None
54):
55 sys.exit("Error: No basf2 release set up!")
56
57pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
58
59
60
63
64
65def date_from_revision(
66 revision: str, work_folder: str
67) -> Optional[Union[int, float]]:
68 """
69 Takes the name of a revision and returns the 'last modified'-timestamp of
70 the corresponding directory, which holds the revision.
71 :param revision: A string containing the name of a revision
72 :return: The 'last modified'-timestamp of the folder which holds the
73 revision
74 """
75
76 # Reference files do not need a date since there is always just one
77 # version of it, which is presumed to be the latest
78 if revision == "reference":
79 return 0
80 # Regular releases and builds however do have a reasonably well defined
81 # 'last modified'-date!
82 else:
83 revisions = os.listdir(validationpath.get_results_folder(work_folder))
84 if revision in revisions:
85 return os.path.getmtime(
86 validationpath.get_results_tag_folder(work_folder, revision)
87 )
88 # Otherwise return a None object
89 else:
90 return None
91
92
93def merge_nested_list_dicts(a, b):
94 """ Given two nested dictionary with same depth that contain lists, return
95 'merged' dictionary that contains the joined lists.
96 :param a: Dict[Dict[...[Dict[List]]..]]
97 :param b: Dict[Dict[...[Dict[List]]..]] (same depth as a)
98 :return:
99 """
100
101 def _merge_nested_list_dicts(_a, _b):
102 """ Merge _b into _a, return _a. """
103 for key in _b:
104 if key in _a:
105 if isinstance(_a[key], dict) and isinstance(_b[key], dict):
106 _merge_nested_list_dicts(_a[key], _b[key])
107 else:
108 assert isinstance(_a[key], list)
109 assert isinstance(_b[key], list)
110 _a[key].extend(_b[key])
111 else:
112 _a[key] = _b[key]
113 return _a
114
115 return _merge_nested_list_dicts(a.copy(), b.copy())
116
117
118def get_plot_files(
119 revisions: List[str], work_folder: str
120) -> Dict[str, Dict[str, List[str]]]:
121 """
122 Returns a list of all plot files as absolute paths. For this purpose,
123 it loops over all revisions in 'revisions', finds the
124 corresponding results folder and collects the plot ROOT files.
125 :param revisions: Name of the revisions.
126 :param work_folder: Folder that contains the results/ directory
127 :return: plot files, i.e. plot ROOT files from the
128 requested revisions as dictionary
129 {revision: {package: [root files]}}
130 """
131 # This is where we store the paths of plot ROOT files we've found
132 results = collections.defaultdict(lambda: collections.defaultdict(list))
133
134 results_foldername = validationpath.get_results_folder(work_folder)
135
136 # Loop over all requested revisions and look for root files
137 # in their package folders
138 for revision in revisions:
139
140 if revision == "reference":
141 results["reference"] = collections.defaultdict(
142 list, get_tracked_reference_files()
143 )
144 continue
145
146 rev_result_folder = os.path.join(results_foldername, revision)
147 if not os.path.isdir(rev_result_folder):
148 continue
149
150 packages = os.listdir(rev_result_folder)
151
152 for package in packages:
153 package_folder = os.path.join(rev_result_folder, package)
154 # find all root files within this package
155 root_files = glob.glob(package_folder + "/*.root")
156 # append with absolute path
157 results[revision][package].extend(
158 [os.path.abspath(rf) for rf in root_files]
159 )
160
161 return results
162
163
164def get_tracked_reference_files() -> Dict[str, List[str]]:
165 """
166 This function loops over the local and central release dir and collects
167 the .root-files from the validation-subfolders of the packages. These are
168 the files which we will use as references.
169 From the central release directory, we collect the files from the release
170 which is set up on the machine running this script.
171 :return: ROOT files that are located
172 in the same folder as the steering files of the package as
173 {package: [list of root files]}
174 """
175
176 # The base paths to the local and central release directories
177 basepaths = {
178 "local": os.environ.get("BELLE2_LOCAL_DIR", None),
179 "central": os.environ.get("BELLE2_RELEASE_DIR", None),
180 }
181
182 # This is where we store the paths of reference ROOT files we've found
183 results = {
184 "local": collections.defaultdict(list),
185 "central": collections.defaultdict(list),
186 }
187
188 # validation folder name used by the packages to keep the validation
189 # reference plots
190 validation_folder_name = "validation"
191 validation_test_folder_name = "validation-test"
192
193 # Now collect both local and central ROOT files:
194 for location in ["local", "central"]:
195
196 # Skip folders that do not exist (e.g. the central release dir might
197 # not be setup if one is working with a completely local version)
198 if basepaths[location] is None:
199 continue
200
201 # list all available packages
202 root = basepaths[location]
203
204 packages = os.listdir(root)
205
206 for package in packages:
207 # searches for a validation folder in any top-most folder (package
208 # folders) and lists all root-files within
209 glob_search = os.path.join(
210 root, package, validation_folder_name, "*.root"
211 )
212 results[location][package].extend(
213 [
214 os.path.abspath(f)
215 for f in glob.glob(glob_search)
216 if os.path.isfile(f)
217 ]
218 )
219 # Special case: The validation-test folder in the validation package
220 # which is used as a quick test of this framework.
221 if package == "validation":
222 glob_search = os.path.join(
223 root, package, validation_test_folder_name, "*.root"
224 )
225 results[location][validation_test_folder_name].extend(
226 [
227 os.path.abspath(f)
228 for f in glob.glob(glob_search)
229 if os.path.isfile(f)
230 ]
231 )
232
233 # Now we need to get a rid of all the duplicates: Since local > central,
234 # we will delete all central reference files that have a local counterpart.
235 # First, loop over all local reference files
236 for package, local_files in results["local"].items():
237 for local_file in local_files:
238 # Remove the location, i.e. reduce the path to /[package]/[filename]
239 local_path = local_file.replace(basepaths["local"], "")
240 # Now loop over all central reference files
241 for central_file in results["central"][package]:
242 # Remove the location, i.e.
243 # reduce the path to /[package]/[filename]
244 central_path = central_file.replace(basepaths["central"], "")
245 # If package and filename are the same, we remove the central
246 # file from our results list
247 if local_path == central_path:
248 results["central"][package].remove(central_file)
249
250 # Return both local and central reference files. The return value does
251 # not maintain the distinction between local and central files, because
252 # we stored the absolute path to the reference files, and local and
253 # central reference files are treated the same anyway.
254
255 ret = {
256 package: results["central"][package] + results["local"][package]
257 for package in list(results["central"].keys())
258 + list(results["central"].keys())
259 }
260
261 return ret
262
263
264def generate_new_plots(
265 revisions: List[str],
266 work_folder: str,
267 process_queue: Optional[Queue] = None,
268 root_error_ignore_level=ROOT.kWarning,
269) -> None:
270 """
271 Creates the plots that contain the requested revisions. Each plot (or
272 n-tuple, for that matter) is stored in an object of class Plot.
273 @param revisions
274 @param work_folder: Folder containing results
275 @param process_queue: communication queue object, which is used in
276 multi-processing mode to report the progress of the plot creating.
277 @param root_error_ignore_level: Value for gErrorIgnoreLevel. Default:
278 ROOT.kWarning. If set to None, global level will be left unchanged.
279 @return: No return value
280 """
281
282 print(
284 "Creating plots for the revision(s) " + ", ".join(revisions) + "."
285 )
286 )
287
288 # Since we are going to plot, we need to initialize ROOT
289 ROOT.gROOT.SetBatch()
290 ROOT.gStyle.SetOptStat(1110)
291 ROOT.gStyle.SetOptFit(101)
292
293 # Prevent cluttering with ROOT info messages
294 if root_error_ignore_level is not None:
295 ROOT.gErrorIgnoreLevel = root_error_ignore_level
296
297 # Before we can start plotting, we of course need to collect all
298 # ROOT-files that contain data for the plot that we want, e.g. we need to
299 # collect all plot ROOT files from the revisions in 'revisions'.
300 # The 'reference'-plots, if requested, require special treatment, as they
301 # are stored on a different location than the regular plot ROOT files.
302
303 # Collect all plot files, i.e. plot ROOT files from the requested revisions
304 if len(revisions) == 0:
305 print(
306 "No revisions selected for plotting. Returning without "
307 "doing anything.",
308 file=sys.stderr,
309 )
310 return
311
312 plot_files = get_plot_files(revisions[1:], work_folder)
313 reference_files = get_plot_files(revisions[:1], work_folder)
314
315 # We don't want to have plots that only show the tracked references.
316 # Instead we collect all packages that have at least one plot of a new
317 # revision in them.
318 # Only exception: If 'reference' is the only revision we have, we show it
319 # because this is clearly what the user wants
320 plot_packages = set()
321 only_tracked_reference = set(plot_files.keys()) | set(
322 reference_files.keys()
323 ) == {"reference"}
324 for results in [plot_files, reference_files]:
325 for rev in results:
326 if rev == "reference" and not only_tracked_reference:
327 continue
328 for package in results[rev]:
329 if results[rev][package]:
330 plot_packages.add(package)
331
332 # The dictionaries {package: {file: {key: [list of root objects]}}}
333 plot_p2f2k2o = rootobjects_from_files(
334 plot_files, is_reference=False, work_folder=work_folder
335 )
336 reference_p2f2k2o = rootobjects_from_files(
337 reference_files, is_reference=True, work_folder=work_folder
338 )
339
340 # Delete all that doesn't belong to a package that we want to plot:
341 for package in set(plot_p2f2k2o.keys()) - plot_packages:
342 del plot_p2f2k2o[package]
343 for package in set(reference_p2f2k2o.keys()) - plot_packages:
344 del reference_p2f2k2o[package]
345
346 all_p2f2k2o = merge_nested_list_dicts(plot_p2f2k2o, reference_p2f2k2o)
347
348 # Open the output file
349 # First: Create destination directory if it does not yet exist
351 work_folder, revisions
352 )
354 work_folder, revisions
355 )
356
357 if not os.path.exists(content_dir):
358 os.makedirs(content_dir)
359
360 comparison_packages = []
361
362 # Collect all plotuples for all the files
363 all_plotuples = []
364
365 # for every package
366 for i, package in enumerate(sorted(list(plot_packages))):
367
368 # Some information to be printed out while the plots are created
369 print(
370 terminal_title_line(
371 f"Creating plots for package: {package}", level=1
372 )
373 )
374
375 compare_files = []
376
377 # Now we loop over all files that belong to the package to
378 # group the plots correctly
379 for rootfile in sorted(all_p2f2k2o[package].keys()):
380 file_name, file_ext = os.path.splitext(rootfile)
381
382 # Some more information to be printed out while plots are
383 # being created
384 print(f"Creating plots for file: {rootfile}")
385
386 # A list in which we keep all the plotuples for this file
387 plotuples = []
388
389 # report the progress over the queue object, if available
390 if process_queue:
391 try:
392 process_queue.put_nowait(
393 {
394 "current_package": i,
395 "total_package": len(plot_packages),
396 "status": "running",
397 "package_name": package,
398 "file_name": file_name,
399 }
400 )
401 except queue.Full:
402 # message could not be placed, but no problem next message
403 # will maybe work
404 pass
405
406 # Now loop over ALL keys (within a file, objects will be
407 # sorted by key)
408 compare_plots = []
409 compare_ntuples = []
410 compare_html_content = []
411 has_reference = False
412
413 root_file_meta_data = collections.defaultdict(lambda: None)
414
415 for key in all_p2f2k2o[package][rootfile].keys():
416 plotuple = Plotuple(
417 all_p2f2k2o[package][rootfile][key], revisions, work_folder
418 )
419 plotuple.create_plotuple()
420 plotuples.append(plotuple)
421 has_reference = plotuple.has_reference()
422
423 if plotuple.type == "TNtuple":
424 compare_ntuples.append(plotuple.create_json_object())
425 elif plotuple.type == "TNamed":
426 compare_html_content.append(plotuple.create_json_object())
427 elif plotuple.type == "meta":
428 meta_key, meta_value = plotuple.get_meta_information()
429 root_file_meta_data[meta_key] = meta_value
430 else:
431 compare_plots.append(plotuple.create_json_object())
432
433 compare_file = json_objects.ComparisonPlotFile(
434 title=file_name,
435 package=package,
436 rootfile=file_name,
437 compared_revisions=revisions,
438 plots=compare_plots,
439 has_reference=has_reference,
440 ntuples=compare_ntuples,
441 html_content=compare_html_content,
442 description=root_file_meta_data["description"],
443 )
444 compare_files.append(compare_file)
445
446 all_plotuples.extend(plotuples)
447
448 comparison_packages.append(
450 name=package, plotfiles=compare_files
451 )
452 )
453 # Make the command line output more readable
454 print()
455
456 print(f"Storing to {comparison_json_file}")
457
458 # create objects for all revisions
459 comparison_revs = []
460
461 for i_revision, revision in enumerate(revisions):
462 line_color = None
463 index = index_from_revision(revision, work_folder)
464 if index is not None:
465 style = get_style(index)
466 line_color = ROOT.gROOT.GetColor(style.GetLineColor()).AsHexString()
467 if i_revision == 0:
468 line_color = "#000000"
469 if line_color is None:
470 print(
471 f"ERROR: line_color for revision f{revision} could not be set!"
472 f" Choosing default color f{line_color}.",
473 file=sys.stderr,
474 )
475 # print("For {} index {} color {}".format(revision, index, line_color))
476
477 # todo the creation date and git_hash of the original revision should
478 # be transferred here
479 comparison_revs.append(
480 json_objects.ComparisonRevision(label=revision, color=line_color)
481 )
482
483 # todo: refactor this information extraction -> json inside a specific
484 # class / method after the plots have been created
486 comparison_json_file,
487 json_objects.Comparison(comparison_revs, comparison_packages),
488 )
489
490 print_plotting_summary(all_plotuples)
491
492
493def print_plotting_summary(
494 plotuples: List[Plotuple], warning_verbosity=1, chi2_verbosity=1
495) -> None:
496 """
497 Print summary of all plotuples plotted, especially printing information
498 about failed comparisons.
499 :param plotuples: List of Plotuple objects
500 :param warning_verbosity: 0: no information about warnings, 1: write out
501 number of warnings per category, 2: report offending scripts
502 :param chi2_verbosity: As warning_verbosity but with the results of the
503 chi2 comparisons
504 :return: None
505 """
506 print()
507 print(terminal_title_line("Summary of plotting", level=0))
508
509 print(f"Total number of plotuples considered: {len(plotuples)}")
510
511 def pt_key(plotuple):
512 """ How we report on this plotuple """
513 key = plotuple.key
514 if len(key) > 30:
515 key = key[:30] + "..."
516 rf = os.path.basename(plotuple.rootfile)
517 if len(rf) > 30:
518 rf = rf[:30] + "..."
519 return f"{plotuple.package}/{key}/{rf}"
520
521 n_warnings = 0
522 plotuple_no_warning = []
523 plotuple_by_warning = collections.defaultdict(list)
524 plotuples_by_comparison_result = collections.defaultdict(list)
525 for plotuple in plotuples:
526 for warning in plotuple.warnings:
527 n_warnings += 1
528 plotuple_by_warning[warning].append(pt_key(plotuple))
529 if not plotuple.warnings:
530 plotuple_no_warning.append(pt_key(plotuple))
531 plotuples_by_comparison_result[plotuple.comparison_result].append(
532 pt_key(plotuple)
533 )
534
535 if warning_verbosity:
536 print()
537 if n_warnings:
538 print(f"A total of {n_warnings} warnings were issued.")
539 for warning, perpetrators in plotuple_by_warning.items():
540 print(
541 f"* '{warning}' was issued by {len(perpetrators)} "
542 f"plotuples"
543 )
544 if warning_verbosity >= 2:
545 for perpetrator in perpetrators:
546 print(f" - {perpetrator}")
547 else:
548 print("No warnings were issued. ")
549 print(
551 total=len(plotuples), success=len(plotuple_no_warning)
552 )
553 )
554 print()
555
556 if chi2_verbosity:
557 if not warning_verbosity:
558 print()
559 print("Chi2 comparisons")
560 for result, perpetrators in plotuples_by_comparison_result.items():
561 print(
562 f"* '{result}' was the result of {len(perpetrators)} "
563 f"comparisons"
564 )
565 if chi2_verbosity >= 2:
566 for perpetrator in perpetrators:
567 print(f" - {perpetrator}")
568 score = (
569 len(plotuples_by_comparison_result["equal"])
570 + 0.75 * len(plotuples_by_comparison_result["not_compared"])
571 + 0.5 * len(plotuples_by_comparison_result["warning"])
572 )
573 print(
575 rate_name="Weighted score: ",
576 total=len(plotuples),
577 success=score,
578 )
579 )
580 print()
581
582
583def rootobjects_from_files(
584 root_files_dict: Dict[str, Dict[str, List[str]]],
585 is_reference: bool,
586 work_folder: str,
587) -> Dict[str, Dict[str, Dict[str, List[RootObject]]]]:
588 """
589 Takes a nested dictionary of root file paths for different revisions
590 and returns a (differently!) nested dictionary of root file objects.
591
592 :param root_files_dict: The dict of all *.root files which shall be
593 read in and for which the corresponding RootObjects shall be created:
594 {revision: {package: [root file]}}
595 :param is_reference: Boolean value indicating if the objects are
596 reference objects or not.
597 :param work_folder:
598 :return: {package: {file: {key: [list of root objects]}}}
599 """
600
601 # Return value: {package: {key: objects}}
602 return_dict = collections.defaultdict(
603 lambda: collections.defaultdict(lambda: collections.defaultdict(list))
604 )
605
606 # Now loop over all given
607 for revision, package2root_files in root_files_dict.items():
608 for package, root_files in package2root_files.items():
609 for root_file in root_files:
610 key2objects = rootobjects_from_file(
611 root_file, package, revision, is_reference, work_folder
612 )
613 for key, objects in key2objects.items():
614 return_dict[package][os.path.basename(root_file)][
615 key
616 ].extend(objects)
617
618 return return_dict
619
620
621def get_root_object_type(root_object: ROOT.TObject) -> str:
622 """
623 Get the type of the ROOT object as a string in a way that makes sense to us.
624 In particular, "" is returned if we have a ROOT object that is of no
625 use to us.
626 :param root_object: ROOT TObject
627 :return: type as string if the ROOT object
628 """
629 if root_object.InheritsFrom("TNtuple"):
630 return "TNtuple"
631 # this will also match TProfile, as this root class derives from
632 # TH1D
633 elif root_object.InheritsFrom("TH1"):
634 if root_object.InheritsFrom("TH2"):
635 return "TH2"
636 else:
637 return "TH1"
638 # TEfficiency barks and quarks like a TProfile, but is unfortunately not
639 elif root_object.InheritsFrom("TEfficiency"):
640 return "TEfficiency"
641 elif root_object.InheritsFrom("TGraph"):
642 return "TGraph"
643 elif root_object.ClassName() == "TNamed":
644 return "TNamed"
645 elif root_object.InheritsFrom("TASImage"):
646 return "TASImage"
647 else:
648 return ""
649
650
651def get_metadata(root_object: ROOT.TObject) -> Dict[str, Any]:
652 """ Extract metadata (description, checks etc.) from a ROOT object
653 :param root_object ROOT TObject
654 """
655 root_object_type = get_root_object_type(root_object)
656
657 metadata = {
658 "description": "n/a",
659 "check": "n/a",
660 "contact": "n/a",
661 "metaoptions": [],
662 }
663
664 # todo [ref, medium]: we should incorporate this in the MetaOptionParser and
665 # never pass them around as a list in the first place
666 def metaoption_str_to_list(metaoption_str):
667 return [opt.strip() for opt in metaoption_str.split(",") if opt.strip()]
668
669 if root_object_type in ["TH1", "TH2", "TEfficiency", "TGraph"]:
670 _metadata = {
671 e.GetName(): e.GetTitle() for e in root_object.GetListOfFunctions()
672 }
673
674 metadata["description"] = _metadata.get("Description", "n/a")
675 metadata["check"] = _metadata.get("Check", "n/a")
676 metadata["contact"] = _metadata.get("Contact", "n/a")
677
678 metadata["metaoptions"] = metaoption_str_to_list(
679 _metadata.get("MetaOptions", "")
680 )
681
682 elif root_object_type == "TNtuple":
683 _description = root_object.GetAlias("Description")
684 _check = root_object.GetAlias("Check")
685 _contact = root_object.GetAlias("Contact")
686
687 if _description:
688 metadata["description"] = _description
689 if _check:
690 metadata["check"] = _check
691 if _contact:
692 metadata["contact"] = _contact
693
694 _metaoptions_str = root_object.GetAlias("MetaOptions")
695 if _metaoptions_str:
696 metadata["metaoptions"] = metaoption_str_to_list(_metaoptions_str)
697
698 # TODO: Can we somehow incorporate TNameds and TASImages?
699
700 return metadata
701
702
703def add_rootobject(
704 root_object: ROOT.TObject,
705 key2object: Dict[str, List[RootObject]],
706 name: str,
707 revision: str,
708 package: str,
709 root_file: str,
710 dir_date: str,
711 is_reference: bool,
712) -> None:
713 """
714 Add a root object with all its metadata to a dictionary
715
716 @param root_object root object that shall be added to the dictionary
717 @param key2object dictionary of all root objects
718 @param name name of root object
719 @param revision revision name
720 @param package package
721 @param root_file the *.root file for which the corresponding RootObjects shall be created
722 @param dir_date time_stamp of the *.root file
723 @param is_reference boolean value indicating if the object is a reference object or not.
724 """
725 root_object_type = get_root_object_type(root_object)
726 if not root_object_type:
727 # get_root_object_type returns "" for any type that we're not
728 # interested in
729 return
730
731 # Ensure that the data read from the ROOT files lives on even
732 # after the ROOT file is closed
733 if root_object.InheritsFrom("TH1"):
734 root_object.SetDirectory(0)
735
736 # Retrieve the metadata
737 metadata = get_metadata(root_object)
738
739 if root_object_type == "TNtuple":
740 # Go to first entry in the n-tuple
741 root_object.GetEntry(0)
742
743 # Storage for the values of the n-tuple. We use a dictionary,
744 # because we can't access the n-tuple's values anymore after
745 # closing the ROOT file (<=> histograms)
746 ntuple_values = {}
747 for leaf in root_object.GetListOfLeaves():
748 ntuple_values[leaf.GetName()] = leaf.GetValue()
749
750 # Overwrite 'root_object' with the dictionary that contains the
751 # values, because the values are what we want to save, and we
752 # want to use the same RootObject()-call for both histograms and
753 # n-tuples :-)
754 root_object = ntuple_values
755
756 key2object[name].append(
757 RootObject(
758 revision,
759 package,
760 root_file,
761 name,
762 root_object,
763 root_object_type,
764 dir_date,
765 metadata["description"],
766 metadata["check"],
767 metadata["contact"],
768 metadata["metaoptions"],
769 is_reference,
770 )
771 )
772
773
774def rootobjects_from_file(
775 root_file: str,
776 package: str,
777 revision: str,
778 is_reference: bool,
779 work_folder: str,
780) -> Dict[str, List[RootObject]]:
781 """
782 Takes a root file, loops over its contents and creates the RootObjects
783 for it.
784
785 :param root_file: The *.root file which shall be read in and for which the
786 corresponding RootObjects shall be created
787 :param package:
788 :param revision:
789 :param work_folder:
790 :param is_reference: Boolean value indicating if the object is a
791 reference object or not.
792 :return: package, {key: [list of root objects]}. Note: The list will
793 contain only one root object right now, because package + root file
794 basename key uniquely determine it, but later we will merge this list
795 with files from other revisions. In case of errors, it returns an
796 empty dictionary.
797 """
798
799 # Return value: {key: root object}
800 key2object = collections.defaultdict(list)
801
802 # Open the file with ROOT
803 # In case of errors, simply return an empty key2object dictionary
804 tfile = None
805 try:
806 tfile = ROOT.TFile(root_file)
807 if not tfile or not tfile.IsOpen():
808 B2ERROR(f"The file {root_file} can not be opened. Skipping it.")
809 return key2object
810 except OSError as e:
811 B2ERROR(f"{e}. Skipping it.")
812 return key2object
813
814 # Get the 'last modified' timestamp of the revision that contains our
815 # current root_file
816 dir_date = date_from_revision(revision, work_folder)
817
818 # Loop over all Keys in that ROOT-File
819 for key in tfile.GetListOfKeys():
820 name = key.GetName()
821
822 # temporary workaround for dbstore files located (wrongly)
823 # in the validation results folder
824 if re.search(".*dbstore.*root", root_file):
825 continue
826
827 # Get the ROOT object that belongs to that Key. If there is no
828 # object, continue
829 root_object = tfile.Get(name)
830 if not root_object:
831 continue
832
833 # If a TDirectory is saved in the file, we step down into the subdirectory
834 if root_object.InheritsFrom("TDirectory"):
835 for sub_key in root_object.GetListOfKeys():
836 # Get ROOT object related to key
837 sub_root_object = sub_key.ReadObj()
838 if not sub_root_object:
839 continue
840
841 add_rootobject(
842 sub_root_object,
843 key2object,
844 f"{name}_{sub_key.GetName()}",
845 revision,
846 package,
847 root_file,
848 dir_date,
849 is_reference,
850 )
851
852 continue
853
854 add_rootobject(
855 root_object,
856 key2object,
857 name,
858 revision,
859 package,
860 root_file,
861 dir_date,
862 is_reference,
863 )
864
865 # Close the ROOT file before we open the next one!
866 tfile.Close()
867
868 return key2object
869
870
871
874
875
876def create_plots(
877 revisions=None,
878 force=False,
879 process_queue: Optional[Queue] = None,
880 work_folder=".",
881):
882 """!
883 This function generates the plots and html
884 page for the requested revisions.
885 By default all available revisions are taken. New plots will only be
886 created if they don't exist already for the given set of revisions, unless the force option is used.
887 @param revisions: The revisions which should be taken into account.
888 @param force: If True, plots are created even if there already is a version
889 of them (which may me deprecated, though)
890 @param process_queue: communication Queue object, which is used in
891 multi-processing mode to report the progress of the plot creating.
892 @param work_folder: The work folder
893 """
894
895 # Initialize the list of revisions which we will plot
896 if not revisions:
897 revisions = []
898
899 # Loop over all revisions given on the command line
900 for revision in revisions:
901 # If it is a valid (i.e. available) revision, append it to the list
902 # of revisions that we will include in our plots
903 # 'reference' needs to be treated
904 # separately, because it is always a viable option, but will never
905 # be listed in 'available_revisions()'
906 if (
907 revision not in available_revisions(work_folder)
908 and not revision == "reference"
909 ):
910 print(f"Warning: Removing invalid revision '{revision}'.")
911 revisions.pop(revision)
912
913 # In case no valid revisions were given, fall back to default and use all
914 # available revisions and reference. The order should now be [reference,
915 # newest_revision, ..., oldest_revision].
916 if not revisions:
917 revisions = ["reference"] + available_revisions(work_folder)
918
919 # Now we check whether the plots for the selected revisions have been
920 # generated before or not. In the path we use the alphabetical order of the
921 # revisions, not the chronological one
922 # (easier to work with on the web server side)
924 work_folder, revisions
925 )
926
927 # If the path exists and we don't want to force the regeneration of plots,
928 # serve what's in the archive
929 if os.path.exists(expected_path) and not force:
930 print(
931 f"Plots for the revision(s) {', '.join(revisions)} have already been created before and will be served " +
932 "from the archive."
933 )
934 # Otherwise: Create the requested plots
935 else:
936 generate_new_plots(revisions, work_folder, process_queue)
937
938 # signal the main process that the plot creation is complete
939 if process_queue:
940 process_queue.put({"status": "complete"})
941 process_queue.close()
942
def dump(file_name, obj)
str congratulator(Optional[Union[int, float]] success=None, Optional[Union[int, float]] failure=None, Optional[Union[int, float]] total=None, just_comment=False, rate_name="Success rate")
str terminal_title_line(title="", subtitle="", level=0)
def get_html_plots_tag_comparison_json(output_base_dir, tags)
def get_results_folder(output_base_dir)
def get_html_plots_tag_comparison_folder(output_base_dir, tags)
def get_results_tag_folder(output_base_dir, tag)