Belle II Software  release-05-02-19
validationplots.py
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 
4 # Normal imports
5 import glob
6 import os
7 import re
8 import sys
9 import queue
10 from typing import Dict, Any, List, Union, Optional
11 import collections
12 from multiprocessing import Queue
13 
14 # Load ROOT
15 import ROOT
16 # In case some ROOT files loaded by the validation scripts contain some
17 # RooFit objects, ROOT will auto-load RooFit. Due to some (yet not
18 # understood) tear down problem, this results in this error:
19 # Fatal in <TClass::SetUnloaded>: The TClass for map<TString,double> is being
20 # unloaded when in state 3 To prevent this, we are loading RooFit here
21 # before ROOT has a chance to do this
22 from ROOT import RooFit
23 
24 # The pretty printer. Print prettier :)
25 import pprint
26 import json_objects
27 
28 import validationpath
29 from validationplotuple import Plotuple
30 from validationfunctions import index_from_revision, get_style, \
31  available_revisions, terminal_title_line
32 import validationfunctions
33 try:
34  import simplejson as json
35 except ImportError:
36  import json
37 
38 from validationrootobject import RootObject
39 
40 
41 # Only execute the program if a basf2 release is set up!
42 if os.environ.get('BELLE2_RELEASE_DIR', None) is None and os.environ.get('BELLE2_LOCAL_DIR', None) is None:
43  sys.exit('Error: No basf2 release set up!')
44 
45 pp = pprint.PrettyPrinter(depth=6, indent=1, width=80)
46 
47 
48 
51 
52 
53 def date_from_revision(revision: str, work_folder: str) -> Optional[Union[int, float]]:
54  """
55  Takes the name of a revision and returns the 'last modified'-timestamp of
56  the corresponding directory, which holds the revision.
57  :param revision: A string containing the name of a revision
58  :return: The 'last modified'-timestamp of the folder which holds the
59  revision
60  """
61 
62  # Reference files do not need a date since there is always just one
63  # version of it, which is presumed to be the latest
64  if revision == 'reference':
65  return 0
66  # Regular releases and builds however do have a reasonably well defined
67  # 'last modified'-date!
68  else:
69  revisions = os.listdir(validationpath.get_results_folder(work_folder))
70  if revision in revisions:
71  return os.path.getmtime(
72  validationpath.get_results_tag_folder(work_folder, revision)
73  )
74  # Otherwise return a None object
75  else:
76  return None
77 
78 
79 def merge_nested_list_dicts(a, b):
80  """ Given two nested dictionary with same depth that contain lists, return
81  'merged' dictionary that contains the joined lists.
82  :param a: Dict[Dict[...[Dict[List]]..]]
83  :param b: Dict[Dict[...[Dict[List]]..]] (same depth as a)
84  :return:
85  """
86 
87  def _merge_nested_list_dicts(_a, _b):
88  """ Merge _b into _a, return _a. """
89  for key in _b:
90  if key in _a:
91  if isinstance(_a[key], dict) and isinstance(_b[key], dict):
92  _merge_nested_list_dicts(_a[key], _b[key])
93  else:
94  assert isinstance(_a[key], list)
95  assert isinstance(_b[key], list)
96  _a[key].extend(_b[key])
97  else:
98  _a[key] = _b[key]
99  return _a
100 
101  return _merge_nested_list_dicts(a.copy(), b.copy())
102 
103 
104 def get_plot_files(
105  revisions: List[str],
106  work_folder: str
107 ) -> Dict[str, Dict[str, List[str]]]:
108  """
109  Returns a list of all plot files as absolute paths. For this purpose,
110  it loops over all revisions in 'revisions', finds the
111  corresponding results folder and collects the plot ROOT files.
112  :param revisions: Name of the revisions.
113  :param work_folder: Folder that contains the results/ directory
114  :return: plot files, i.e. plot ROOT files from the
115  requested revisions as dictionary
116  {revision: {package: [root files]}}
117  """
118  # This is where we store the paths of plot ROOT files we've found
119  results = collections.defaultdict(lambda: collections.defaultdict(list))
120 
121  results_foldername = validationpath.get_results_folder(work_folder)
122 
123  # Loop over all requested revisions and look for root files
124  # in their package folders
125  for revision in revisions:
126 
127  if revision == "reference":
128  results["reference"] = collections.defaultdict(
129  list, get_tracked_reference_files()
130  )
131  continue
132 
133  rev_result_folder = os.path.join(results_foldername, revision)
134  if not os.path.isdir(rev_result_folder):
135  continue
136 
137  packages = os.listdir(rev_result_folder)
138 
139  for package in packages:
140  package_folder = os.path.join(rev_result_folder, package)
141  # find all root files within this package
142  root_files = glob.glob(package_folder + "/*.root")
143  # append with absolute path
144  results[revision][package].extend(
145  [os.path.abspath(rf) for rf in root_files]
146  )
147 
148  return results
149 
150 
151 def get_tracked_reference_files() -> Dict[str, List[str]]:
152  """
153  This function loops over the local and central release dir and collects
154  the .root-files from the validation-subfolders of the packages. These are
155  the files which we will use as references.
156  From the central release directory, we collect the files from the release
157  which is set up on the machine running this script.
158  :return: ROOT files that are located
159  in the same folder as the steering files of the package as
160  {package: [list of root files]}
161  """
162 
163  # The base paths to the local and central release directories
164  basepaths = {'local': os.environ.get('BELLE2_LOCAL_DIR', None),
165  'central': os.environ.get('BELLE2_RELEASE_DIR', None)}
166 
167  # This is where we store the paths of reference ROOT files we've found
168  results = {
169  'local': collections.defaultdict(list),
170  'central': collections.defaultdict(list)
171  }
172 
173  # validation folder name used by the packages to keep the validation
174  # reference plots
175  validation_folder_name = 'validation'
176  validation_test_folder_name = 'validation-test'
177 
178  # Now collect both local and central ROOT files:
179  for location in ['local', 'central']:
180 
181  # Skip folders that do not exist (e.g. the central release dir might
182  # not be setup if one is working with a completely local version)
183  if basepaths[location] is None:
184  continue
185 
186  # list all available packages
187  root = basepaths[location]
188 
189  packages = os.listdir(root)
190 
191  for package in packages:
192  # searches for a validation folder in any top-most folder (package
193  # folders) and lists all root-files within
194  glob_search = os.path.join(
195  root, package, validation_folder_name, "*.root"
196  )
197  results[location][package].extend([
198  os.path.abspath(f) for f in glob.glob(glob_search)
199  if os.path.isfile(f)
200  ])
201  # Special case: The validation-test folder in the validation package
202  # which is used as a quick test of this framework.
203  if package == "validation":
204  glob_search = os.path.join(
205  root,
206  package,
207  validation_test_folder_name,
208  "*.root"
209  )
210  results[location][validation_test_folder_name].extend([
211  os.path.abspath(f) for f in glob.glob(glob_search)
212  if os.path.isfile(f)
213  ])
214 
215  # Now we need to get a rid of all the duplicates: Since local > central,
216  # we will delete all central reference files that have a local counterpart.
217  # First, loop over all local reference files
218  for package, local_files in results['local'].items():
219  for local_file in local_files:
220  # Remove the location, i.e. reduce the path to /[package]/[filename]
221  local_path = local_file.replace(basepaths['local'], '')
222  # Now loop over all central reference files
223  for central_file in results['central'][package]:
224  # Remove the location, i.e.
225  # reduce the path to /[package]/[filename]
226  central_path = central_file.replace(basepaths['central'], '')
227  # If package and filename are the same, we remove the central
228  # file from our results list
229  if local_path == central_path:
230  results['central'][package].remove(central_file)
231 
232  # Return both local and central reference files. The return value does
233  # not maintain the distinction between local and central files, because
234  # we stored the absolute path to the reference files, and local and
235  # central reference files are treated the same anyway.
236 
237  ret = {
238  package:
239  results['central'][package] + results['local'][package]
240  for package in
241  list(results['central'].keys()) + list(results['central'].keys())
242  }
243 
244  return ret
245 
246 
247 def generate_new_plots(
248  revisions: List[str],
249  work_folder: str,
250  process_queue: Optional[Queue] = None,
251  root_error_ignore_level=ROOT.kWarning
252 ) -> None:
253  """
254  Creates the plots that contain the requested revisions. Each plot (or
255  n-tuple, for that matter) is stored in an object of class Plot.
256  @param revisions
257  @param work_folder: Folder containing results
258  @param process_queue: communication queue object, which is used in
259  multi-processing mode to report the progress of the plot creating.
260  @param root_error_ignore_level: Value for gErrorIgnoreLevel. Default:
261  ROOT.kWarning. If set to None, global level will be left unchanged.
262  @return: No return value
263  """
264 
266  "Creating plots for the revision(s) " + ", ".join(revisions) + "."
267  ))
268 
269  # Since we are going to plot, we need to initialize ROOT
270  ROOT.gROOT.SetBatch()
271  ROOT.gStyle.SetOptStat(1110)
272  ROOT.gStyle.SetOptFit(101)
273 
274  # Prevent cluttering with ROOT info messages
275  if root_error_ignore_level is not None:
276  ROOT.gErrorIgnoreLevel = root_error_ignore_level
277 
278  # Before we can start plotting, we of course need to collect all
279  # ROOT-files that contain data for the plot that we want, e.g. we need to
280  # collect all plot ROOT files from the revisions in 'revisions'.
281  # The 'reference'-plots, if requested, require special treatment, as they
282  # are stored on a different location than the regular plot ROOT files.
283 
284  # Collect all plot files, i.e. plot ROOT files from the requested revisions
285  if len(revisions) == 0:
286  print("No revisions selected for plotting. Returning without "
287  "doing anything.", file=sys.stderr)
288  return
289 
290  plot_files = get_plot_files(revisions[1:], work_folder)
291  reference_files = get_plot_files(revisions[:1], work_folder)
292 
293  # We don't want to have plots that only show the tracked references.
294  # Instead we collect all packages that have at least one plot of a new
295  # revision in them.
296  # Only exception: If 'reference' is the only revision we have, we show it
297  # because this is clearly what the user wants
298  plot_packages = set()
299  only_tracked_reference = \
300  set(plot_files.keys()) | set(reference_files.keys()) == {"reference"}
301  for results in [plot_files, reference_files]:
302  for rev in results:
303  if rev == "reference" and not only_tracked_reference:
304  continue
305  for package in results[rev]:
306  if results[rev][package]:
307  plot_packages.add(package)
308 
309  # The dictionaries {package: {file: {key: [list of root objects]}}}
310  plot_p2f2k2o = rootobjects_from_files(
311  plot_files,
312  is_reference=False,
313  work_folder=work_folder
314  )
315  reference_p2f2k2o = rootobjects_from_files(
316  reference_files,
317  is_reference=True,
318  work_folder=work_folder
319  )
320 
321  # Delete all that doesn't belong to a package that we want to plot:
322  for package in set(plot_p2f2k2o.keys()) - plot_packages:
323  del plot_p2f2k2o[package]
324  for package in set(reference_p2f2k2o.keys()) - plot_packages:
325  del reference_p2f2k2o[package]
326 
327  all_p2f2k2o = merge_nested_list_dicts(plot_p2f2k2o, reference_p2f2k2o)
328 
329  # Open the output file
330  # First: Create destination directory if it does not yet exist
332  work_folder,
333  revisions
334  )
336  work_folder,
337  revisions
338  )
339 
340  if not os.path.exists(content_dir):
341  os.makedirs(content_dir)
342 
343  comparison_packages = []
344 
345  # Collect all plotuples for all the files
346  all_plotuples = []
347 
348  # for every package
349  for i, package in enumerate(sorted(list(plot_packages))):
350 
351  # Some information to be printed out while the plots are created
352  print(terminal_title_line(
353  f'Creating plots for package: {package}',
354  level=1
355  ))
356 
357  compare_files = []
358 
359  # Now we loop over all files that belong to the package to
360  # group the plots correctly
361  for rootfile in sorted(all_p2f2k2o[package].keys()):
362  file_name, file_ext = os.path.splitext(rootfile)
363 
364  # Some more information to be printed out while plots are
365  # being created
366  print(f'Creating plots for file: {rootfile}')
367 
368  # A list in which we keep all the plotuples for this file
369  plotuples = []
370 
371  # report the progress over the queue object, if available
372  if process_queue:
373  try:
374  process_queue.put_nowait(
375  {
376  "current_package": i,
377  "total_package": len(plot_packages),
378  "status": "running",
379  "package_name": package,
380  "file_name": file_name
381  }
382  )
383  except queue.Full:
384  # message could not be placed, but no problem next message
385  # will maybe work
386  pass
387 
388  # Now loop over ALL keys (within a file, objects will be
389  # sorted by key)
390  compare_plots = []
391  compare_ntuples = []
392  compare_html_content = []
393  has_reference = False
394 
395  root_file_meta_data = collections.defaultdict(lambda: None)
396 
397  for key in all_p2f2k2o[package][rootfile].keys():
398  plotuple = Plotuple(
399  all_p2f2k2o[package][rootfile][key],
400  revisions,
401  work_folder
402  )
403  plotuple.create_plotuple()
404  plotuples.append(plotuple)
405  has_reference = plotuple.has_reference()
406 
407  if plotuple.type == 'TNtuple':
408  compare_ntuples.append(plotuple.create_json_object())
409  elif plotuple.type == 'TNamed':
410  compare_html_content.append(plotuple.create_json_object())
411  elif plotuple.type == "meta":
412  meta_key, meta_value = plotuple.get_meta_information()
413  root_file_meta_data[meta_key] = meta_value
414  else:
415  compare_plots.append(plotuple.create_json_object())
416 
417  compare_file = json_objects.ComparisonPlotFile(
418  title=file_name,
419  package=package,
420  rootfile=file_name,
421  compared_revisions=revisions,
422  plots=compare_plots,
423  has_reference=has_reference,
424  ntuples=compare_ntuples,
425  html_content=compare_html_content,
426  description=root_file_meta_data["description"]
427  )
428  compare_files.append(compare_file)
429 
430  all_plotuples.extend(plotuples)
431 
432  comparison_packages.append(
434  name=package,
435  plotfiles=compare_files)
436  )
437  # Make the command line output more readable
438  print()
439 
440  print(f"Storing to {comparison_json_file}")
441 
442  # create objects for all revisions
443  comparison_revs = []
444 
445  for i_revision, revision in enumerate(revisions):
446  line_color = None
447  index = index_from_revision(revision, work_folder)
448  if index is not None:
449  style = get_style(index)
450  line_color = ROOT.gROOT.GetColor(style.GetLineColor()).AsHexString()
451  if i_revision == 0:
452  line_color = "#000000"
453  if line_color is None:
454  print(
455  f"ERROR: line_color for revision f{revision} could not be set!"
456  f" Choosing default color f{line_color}.",
457  file=sys.stderr
458  )
459  # print("For {} index {} color {}".format(revision, index, line_color))
460 
461  # todo the creation date and git_hash of the original revision should
462  # be transferred here
463  comparison_revs.append(json_objects.ComparisonRevision(
464  label=revision,
465  color=line_color)
466  )
467 
468  # todo: refactor this information extraction -> json inside a specific
469  # class / method after the plots have been created
471  comparison_json_file,
472  json_objects.Comparison(comparison_revs, comparison_packages)
473  )
474 
475  print_plotting_summary(all_plotuples)
476 
477 
478 def print_plotting_summary(
479  plotuples: List[Plotuple],
480  warning_verbosity=1,
481  chi2_verbosity=1
482 ) -> None:
483  """
484  Print summary of all plotuples plotted, especially printing information
485  about failed comparisons.
486  :param plotuples: List of Plotuple objects
487  :param warning_verbosity: 0: no information about warnings, 1: write out
488  number of warnings per category, 2: report offending scripts
489  :param chi2_verbosity: As warning_verbosity but with the results of the
490  chi2 comparisons
491  :return: None
492  """
493  print()
494  print(terminal_title_line(
495  "Summary of plotting",
496  level=0
497  ))
498 
499  print("Total number of plotuples considered: {}".format(len(plotuples)))
500 
501  def pt_key(plotuple):
502  """ How we report on this plotuple """
503  key = plotuple.key
504  if len(key) > 30:
505  key = key[:30] + "..."
506  rf = os.path.basename(plotuple.rootfile)
507  if len(rf) > 30:
508  rf = rf[:30] + "..."
509  return f"{plotuple.package}/{key}/{rf}"
510 
511  n_warnings = 0
512  plotuple_no_warning = []
513  plotuple_by_warning = collections.defaultdict(list)
514  plotuples_by_comparison_result = collections.defaultdict(list)
515  for plotuple in plotuples:
516  for warning in plotuple.warnings:
517  n_warnings += 1
518  plotuple_by_warning[warning].append(pt_key(plotuple))
519  if not plotuple.warnings:
520  plotuple_no_warning.append(pt_key(plotuple))
521  plotuples_by_comparison_result[plotuple.comparison_result].append(
522  pt_key(plotuple)
523  )
524 
525  if warning_verbosity:
526  print()
527  if n_warnings:
528  print(f"A total of {n_warnings} warnings were issued.")
529  for warning, perpetrators in plotuple_by_warning.items():
530  print(f"* '{warning}' was issued by {len(perpetrators)} "
531  f"plotuples")
532  if warning_verbosity >= 2:
533  for perpetrator in perpetrators:
534  print(f" - {perpetrator}")
535  else:
536  print("No warnings were issued. ")
538  total=len(plotuples),
539  success=len(plotuple_no_warning)
540  ))
541  print()
542 
543  if chi2_verbosity:
544  if not warning_verbosity:
545  print()
546  print("Chi2 comparisons")
547  for result, perpetrators in plotuples_by_comparison_result.items():
548  print(f"* '{result}' was the result of {len(perpetrators)} "
549  f"comparisons")
550  if chi2_verbosity >= 2:
551  for perpetrator in perpetrators:
552  print(f" - {perpetrator}")
553  score = len(plotuples_by_comparison_result["equal"]) + \
554  0.75 * len(plotuples_by_comparison_result["not_compared"]) + \
555  0.5 * len(plotuples_by_comparison_result["warning"])
557  rate_name="Weighted score: ",
558  total=len(plotuples),
559  success=score,
560  ))
561  print()
562 
563 
564 def rootobjects_from_files(
565  root_files_dict: Dict[str, Dict[str, List[str]]],
566  is_reference: bool,
567  work_folder: str
568 ) -> Dict[str, Dict[str, Dict[str, List[RootObject]]]]:
569  """
570  Takes a nested dictionary of root file paths for different revisions
571  and returns a (differently!) nested dictionary of root file objects.
572 
573  :param root_files_dict: The dict of all *.root files which shall be
574  read in and for which the corresponding RootObjects shall be created:
575  {revision: {package: [root file]}}
576  :param is_reference: Boolean value indicating if the objects are
577  reference objects or not.
578  :param work_folder:
579  :return: {package: {file: {key: [list of root objects]}}}
580  """
581 
582  # Return value: {package: {key: objects}}
583  return_dict = collections.defaultdict(
584  lambda: collections.defaultdict(
585  lambda: collections.defaultdict(list)
586  )
587  )
588 
589  # Now loop over all given
590  for revision, package2root_files in root_files_dict.items():
591  for package, root_files in package2root_files.items():
592  for root_file in root_files:
593  key2objects = rootobjects_from_file(
594  root_file,
595  package,
596  revision,
597  is_reference,
598  work_folder
599  )
600  for key, objects in key2objects.items():
601  return_dict[package][os.path.basename(root_file)][key].extend(objects)
602 
603  return return_dict
604 
605 
606 def get_root_object_type(root_object: ROOT.TObject) -> str:
607  """
608  Get the type of the ROOT object as a string in a way that makes sense to us.
609  In particular, "" is returned if we have a ROOT object that is of no
610  use to us.
611  :param root_object: ROOT TObject
612  :return: type as string if the ROOT object
613  """
614  if root_object.InheritsFrom('TNtuple'):
615  return 'TNtuple'
616  # this will also match TProfile, as this root class derives from
617  # TH1D
618  elif root_object.InheritsFrom('TH1'):
619  if root_object.InheritsFrom('TH2'):
620  return 'TH2'
621  else:
622  return 'TH1'
623  # TEfficiency barks and quarks like a TProfile, but is unfortunately not
624  elif root_object.InheritsFrom('TEfficiency'):
625  return 'TEfficiency'
626  elif root_object.InheritsFrom('TGraph'):
627  return 'TGraph'
628  elif root_object.ClassName() == 'TNamed':
629  return 'TNamed'
630  elif root_object.InheritsFrom('TASImage'):
631  return 'TASImage'
632  else:
633  return ""
634 
635 
636 def get_metadata(root_object: ROOT.TObject) -> Dict[str, Any]:
637  """ Extract metadata (description, checks etc.) from a ROOT object
638  :param root_object ROOT TObject
639  """
640  root_object_type = get_root_object_type(root_object)
641 
642  metadata = {
643  "description": "n/a",
644  "check": "n/a",
645  "contact": "n/a",
646  "metaoptions": []
647  }
648 
649  # todo [ref, medium]: we should incorporate this in the MetaOptionParser and
650  # never pass them around as a list in the first place
651  def metaoption_str_to_list(metaoption_str):
652  return [
653  opt.strip() for opt in metaoption_str.split(',') if opt.strip()
654  ]
655 
656  if root_object_type in ['TH1', 'TH2', 'TEfficiency', 'TGraph']:
657  _metadata = {
658  e.GetName(): e.GetTitle()
659  for e in root_object.GetListOfFunctions()
660  }
661 
662  metadata["description"] = _metadata.get("Description", "n/a")
663  metadata["check"] = _metadata.get("Check", "n/a")
664  metadata["contact"] = _metadata.get("Contact", "n/a")
665 
666  metadata["metaoptions"] = metaoption_str_to_list(
667  _metadata.get("MetaOptions", "")
668  )
669 
670  elif root_object_type == 'TNtuple':
671  _description = root_object.GetAlias('Description')
672  _check = root_object.GetAlias('Check')
673  _contact = root_object.GetAlias('Contact')
674 
675  if _description:
676  metadata["description"] = _description
677  if _check:
678  metadata["check"] = _check
679  if _contact:
680  metadata["contact"] = _contact
681 
682  _metaoptions_str = root_object.GetAlias('MetaOptions')
683  if _metaoptions_str:
684  metadata["metaoptions"] = metaoption_str_to_list(_metaoptions_str)
685 
686  # TODO: Can we somehow incorporate TNameds and TASImages?
687 
688  return metadata
689 
690 
691 def rootobjects_from_file(
692  root_file: str,
693  package: str,
694  revision: str,
695  is_reference: bool,
696  work_folder: str,
697 ) -> Dict[str, List[RootObject]]:
698  """
699  Takes a root file, loops over its contents and creates the RootObjects
700  for it.
701 
702  :param root_file: The *.root file which shall be read in and for which the
703  corresponding RootObjects shall be created
704  :param package:
705  :param revision:
706  :param work_folder:
707  :param is_reference: Boolean value indicating if the object is a
708  reference object or not.
709  :return: package, {key: [list of root objects]}. Note: The list will
710  contain only one root object right now, because package + root file
711  basename key uniquely determine it, but later we will merge this list
712  with files from other revisions.
713  """
714 
715  # Return value: {key: root object}
716  key2object = collections.defaultdict(list)
717 
718  # Get the 'last modified' timestamp of the revision that contains our
719  # current root_file
720  dir_date = date_from_revision(revision, work_folder)
721 
722  # Open the file with ROOT
723  tfile = ROOT.TFile(root_file)
724 
725  # Loop over all Keys in that ROOT-File
726  for key in tfile.GetListOfKeys():
727  name = key.GetName()
728 
729  # temporary workaround for dbstore files located (wrongly)
730  # in the validation results folder
731  if re.search(".*dbstore.*root", root_file):
732  continue
733 
734  # Get the ROOT object that belongs to that Key. If there is no
735  # object, continue
736  root_object = tfile.Get(name)
737  if not root_object:
738  continue
739 
740  root_object_type = get_root_object_type(root_object)
741  if not root_object_type:
742  # get_root_object_type returns "" for any type that we're not
743  # interested in
744  continue
745 
746  # Ensure that the data read from the ROOT files lives on even
747  # after the ROOT file is closed
748  if root_object.InheritsFrom("TH1"):
749  root_object.SetDirectory(0)
750 
751  metadata = get_metadata(root_object)
752 
753  if root_object_type == "TNtuple":
754  # Go to first entry in the n-tuple
755  root_object.GetEntry(0)
756 
757  # Storage for the values of the n-tuple. We use a dictionary,
758  # because we can't access the n-tuple's values anymore after
759  # closing the ROOT file (<=> histograms)
760  ntuple_values = {}
761  for leaf in root_object.GetListOfLeaves():
762  ntuple_values[leaf.GetName()] = leaf.GetValue()
763 
764  # Overwrite 'root_object' with the dictionary that contains the
765  # values, because the values are what we want to save, and we
766  # want to use the same RootObject()-call for both histograms and
767  # n-tuples :-)
768  root_object = ntuple_values
769 
770  key2object[name].append(
771  RootObject(
772  revision,
773  package,
774  root_file,
775  name,
776  root_object,
777  root_object_type,
778  dir_date,
779  metadata["description"],
780  metadata["check"],
781  metadata["contact"],
782  metadata["metaoptions"],
783  is_reference
784  )
785  )
786 
787  # Close the ROOT file before we open the next one!
788  tfile.Close()
789 
790  return key2object
791 
792 
793 
796 
797 
798 def create_plots(
799  revisions=None,
800  force=False,
801  process_queue: Optional[Queue] = None,
802  work_folder="."
803 ):
804  """!
805  This function generates the plots and html
806  page for the requested revisions.
807  By default all available revisions are taken. New plots will ony be
808  created if they don't exist already for the given set of revisions,
809  unless the force option is used.
810  @param revisions: The revisions which should be taken into account.
811  @param force: If True, plots are created even if there already is a version
812  of them (which may me deprecated, though)
813  @param process_queue: communication Queue object, which is used in
814  multi-processing mode to report the progress of the plot creating.
815  @param work_folder: The work folder
816  """
817 
818  # Initialize the list of revisions which we will plot
819  if not revisions:
820  revisions = []
821 
822  # Loop over all revisions given on the command line
823  for revision in revisions:
824  # If it is a valid (i.e. available) revision, append it to the list
825  # of revisions that we will include in our plots
826  # 'reference' needs to be treated
827  # separately, because it is always a viable option, but will never
828  # be listed in 'available_revisions()'
829  if revision not in available_revisions(work_folder) \
830  and not revision == 'reference':
831  print(f"Warning: Removing invalid revision '{revision}'.")
832  revisions.pop(revision)
833 
834  # In case no valid revisions were given, fall back to default and use all
835  # available revisions and reference. The order should now be [reference,
836  # newest_revision, ..., oldest_revision].
837  if not revisions:
838  revisions = ['reference'] + available_revisions(work_folder)
839 
840  # Now we check whether the plots for the selected revisions have been
841  # generated before or not. In the path we use the alphabetical order of the
842  # revisions, not the chronological one
843  # (easier to work with on the web server side)
845  work_folder,
846  revisions
847  )
848 
849  # If the path exists and we don't want to force the regeneration of plots,
850  # serve what's in the archive
851  if os.path.exists(expected_path) and not force:
852  print(
853  "Plots for the revision(s) {} have already been created before "
854  "and will be served from the archive.".format(
855  ", ".join(revisions))
856  )
857  # Otherwise: Create the requested plots
858  else:
859  generate_new_plots(revisions, work_folder, process_queue)
860 
861  # signal the main process that the plot creation is complete
862  if process_queue:
863  process_queue.put({"status": "complete"})
864  process_queue.close()
validationfunctions.congratulator
str congratulator(Optional[Union[int, float]] success=None, Optional[Union[int, float]] failure=None, Optional[Union[int, float]] total=None, just_comment=False, rate_name="Success rate")
Definition: validationfunctions.py:470
json_objects.Comparison
Definition: json_objects.py:521
json_objects.ComparisonPlotFile
Definition: json_objects.py:266
json_objects.ComparisonPackage
Definition: json_objects.py:462
validationpath.get_results_folder
def get_results_folder(output_base_dir)
Return the absolute path to the results folder.
Definition: validationpath.py:70
json_objects.ComparisonRevision
Definition: json_objects.py:502
validationpath.get_html_plots_tag_comparison_json
def get_html_plots_tag_comparison_json(output_base_dir, tags)
Return the absolute path json file with the comparison file.
Definition: validationpath.py:108
validationfunctions.terminal_title_line
str terminal_title_line(title="", subtitle="", level=0)
Definition: validationfunctions.py:555
validationpath.get_html_plots_tag_comparison_folder
def get_html_plots_tag_comparison_folder(output_base_dir, tags)
Return the absolute path to the results folder.
Definition: validationpath.py:91
validationpath.get_results_tag_folder
def get_results_tag_folder(output_base_dir, tag)
Return the absolute path to the results folder for one specific tag.
Definition: validationpath.py:118
json_objects.dump
def dump(file_name, obj)
Definition: json_objects.py:554