17 Define datatypes for later serialization by json
30 Base object for all json-serializable objects of the validation suite
37 Contains information about a specific revision
46 creation_timezone=None,
49 Create a new Revision object and fill all members
71 self.
packagespackages = []
if (packages
is None)
else packages
77 Container for a list of revisions
82 Create a new Revisions object and fill all members
92 Contains information about a script and its execution output
95 def __init__(self, name, path, status, log_url=None, return_code=None, input=None, output=None):
97 Create a new Script object and fill all members
125 Wrapper for a file containing a set of plots, only
129 def __init__(self, package, title, rootfile, plots, description=""):
131 Create a new PlotFile object and fill all members
151 Wrapper for one specfic plot.
165 Create a new Plot object and fill all members
189 Wrapper for NTuple lists. This is not a graphical plot, but a list of
193 def __init__(self, is_expert=False, description=None, check=None):
195 Create a new NTuple object and fill all members
209 Wrapper for user HTML Content. This is not a graphical plot but HTML
210 code which will be directly output on the validation website.
214 def __init__(self, is_expert=False, description=None, check=None):
216 Create a new NTuple object and fill all members
230 One high-level package of the validation suites which contains a set of
231 scripts and output plot files
234 def __init__(self, name, plotfiles=None, scriptfiles=None, fail_count=0):
236 Create a new NTuple object and fill all members
259 Enum to classify the comparison result of two validation plots
264 NotCompared =
"not_compared"
267 NotSupported =
"not_supported"
270 FailureTechnical =
"technical_failure"
273 FailureStastical =
"statistical_failure"
282 Contains the comparison result of two plots
287 Create a new ComparisonResult object and fill all members
299 Contains information about a file containing plots and the comparison which
300 have been performed for the content of this file
308 compared_revisions=None,
316 Create a new ComparisonPlotFile object and fill all members
327 package, title, rootfile, plots, description=description
341 [plt
for plt
in self.
plotsplots
if plt.comparison_result ==
"error"]
347 for plt
in self.
plotsplots
348 if (
not plt.is_expert)
and plt.comparison_result ==
"error"
353 [plt
for plt
in self.
plotsplots
if plt.comparison_result ==
"warning"]
360 for plt
in self.
plotsplots
361 if (
not plt.is_expert)
and plt.comparison_result ==
"warning"
367 [
not tuple.is_expert
for tuple
in self.
ntuplesntuples]
379 One individual plot including its comparison outcome.
385 comparison_result=None,
393 comparison_text=None,
399 Create a new ComparisonPlot object and fill all members
405 description=description,
439 Comparison outcome for NTuples
452 Create a new ComparisonNTuple object and fill all members
457 is_expert=is_expert, description=description, check=check
471 Compiled HTLM Content
484 Create a new ComparisonNTuple object and fill all members
489 is_expert=is_expert, description=description, check=check
503 Information about a Package which was used in a comparison operation
507 self, name, plotfiles=None, scriptfiles=None, ntuplefiles=None
510 Create a new ComparisonPackage object and fill all members
520 super().
__init__(name, plotfiles=plotfiles, scriptfiles=scriptfiles)
526 [pf.comparison_error_shifter
for pf
in plotfiles]
530 [pf.comparison_warning
for pf
in plotfiles]
535 [pf.comparison_warning_shifter
for pf
in plotfiles]
542 Revision information enriched by the information gained during
546 def __init__(self, label, git_hash=None, creation_date=None, color=None):
548 Create a new ComparisonRevision object and fill all members
552 super().
__init__(label, git_hash=git_hash, creation_date=
None)
561 Contains information and plots generated for comparisons
567 Create a new ComparisonRevision object and fill all members
579 sorted_revs = sorted(revisions, key=
lambda x: x.label)
582 lambda x, y: x +
"_" + y.label, sorted_revs,
""
591 def dump(file_name, obj):
593 Output a tree of objects into a json file
596 with open(file_name,
"w+")
as f:
597 json.dump(dump_rec(obj), f, sort_keys=
True, indent=4)
602 Convert a tree of python objects into a json file
606 return json.dumps(kk, sort_keys=
True, indent=4)
609 def dump_rec(top_object):
611 Recursive generating of dictionary from a tree
617 for (k, v)
in top_object.__dict__.items():
620 if isinstance(v, list):
625 if isinstance(it, JsonBase):
627 obj_list.append(dump_rec(it))
633 this_dict[k] = obj_list
636 elif isinstance(v, JsonBase):
637 this_dict[k] = dump_rec(v)
639 elif isinstance(v, enum.Enum):
640 this_dict[k] = v.value
title
Text used as title for the ntuple item.
def __init__(self, title, contact=None, description=None, check=None, is_expert=None, html_content=None)
contact
name of contact person
html_content
path to the json file which contains the individual numbers of the ntuple
title
Text used as title for the ntuple item.
contact
name of contact person
json_file_path
path to the json file which contains the individual numbers of the ntuple (must be relative to html d...
def __init__(self, title, contact=None, description=None, check=None, is_expert=None, json_file_path=None)
comparison_warning_shifter
the number of comparisons of shifter plots which resulted in a warning
comparison_error
the number of failed comparisons in this package
def __init__(self, name, plotfiles=None, scriptfiles=None, ntuplefiles=None)
comparison_error_shifter
the number of failed comparisons of shifter plots in this package
comparison_warning
the number of comparisons which resulted in a warning
has_reference
true if a reference file is available for this plot file
def __init__(self, package, title, rootfile, compared_revisions=None, plots=None, has_reference=False, ntuples=None, html_content=None, description=None)
comparison_warning_shifter
the number of comparisons of shifter plots in this file which resulted in a warning
comparison_error
the number of failed comparisons in this file
show_shifter
Show to shifter, i.e.
comparison_error_shifter
the number of failed comparisons of shifter plots in this file
ntuples
the ntuples which were compared
comparison_warning
the number of comparisons which resulted in a warning
compared_revision
label of the revision which were used in this comparison
html_content
user's html content
n_shifter_ntuples
Number of shifter ntuples.
title
tile used to display this plot
pdf_filename
the filename of the pdf file plotted with the comparison graphs
warnings
Warnings ("no contact" person etc.)
comparison_result
text string for the comparison outcome
plot_path
path were the png and pdf files are located (relative to the html directory; has to end with trailing...
def __init__(self, title, comparison_result=None, png_filename=None, pdf_filename=None, contact=None, description=None, check=None, is_expert=None, plot_path=None, comparison_text=None, height=None, width=None, warnings=None)
png_filename
the filename of the png file plotted with the comparison graphs
comparison_text
verbose text describing the outcome of the comparison
chi2
the chi2 value computed during the comparison
state
a string containing a description of the comparison's outcome
def __init__(self, state, chi2)
color
the color which was used for this revision in the comparison plots
def __init__(self, label, git_hash=None, creation_date=None, color=None)
label
the unique label of this comparison
packages
the list of packages looked at in this comparison
revisions
the list of revisions used in this comparison
def __init__(self, revisions=None, packages=None)
description
telling description for this HTML code
def __init__(self, is_expert=False, description=None, check=None)
is_expert
true if this is marked as an expert-only HTML code
check
what should be checked for in this HTML code
description
telling description for this ntuple
def __init__(self, is_expert=False, description=None, check=None)
is_expert
true if this is marked as an expert-only ntuple list
check
what should be checked for in this ntuple ?
visible
true if this package is displayed on the default validation website
def __init__(self, name, plotfiles=None, scriptfiles=None, fail_count=0)
plotfiles
list of plotfiles which were produced by the scripts in this package
scriptfiles
scripts which were run or skipped as this package was executed
fail_count
contains the number how many scripts failed to execute with error
title
Display name of this file.
description
Description of plot file.
plots
list of plots which are contained inside this plot file
n_shifter_plots
Number of shifter plots.
package
name of the package which created this file
rootfile
filename of the root file
def __init__(self, package, title, rootfile, plots, description="")
description
telling description for this plot
contact
Who is the contact person for this plot ?
height
height of the plot in pixels
width
width of the plot in pixels
def __init__(self, is_expert=False, description=None, check=None, contact=None, width=None, height=None, issue=None)
is_expert
true if this is marked as an expert-only plot
check
What should be checked for in this plot ?
label
label (or tag) used to display this revision
packages
list of packages contained in this revision
def __init__(self, label, git_hash=None, creation_date=None, packages=None, creation_timezone=None)
most_recent
is this the most recent revision in the list this revision is contained ?
git_hash
The git commit hash which has the HEAD while the validation scripts were executed.
creation_date
date when the validation output of this revision was created, as datetime object
creation_timezone
timezone used by the creation date
def __init__(self, revisions)
status
Output status of the script execution, can be one of the strings "failed", "finished",...
def __init__(self, name, path, status, log_url=None, return_code=None, input=None, output=None)
input
input files for the script as declared in the header
path
path the script file is located
return_code
integer which is the return code of the script execution
name
the name of the script file
log_url
location where the log output of the script execution can be found
output
output files produced by the script as declared in the header