Belle II Software  release-05-02-19
classification.py
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 
4 import numpy as np
5 import collections
6 import numbers
7 import copy
8 
9 from . import scores
10 from . import statistics
11 
12 from .plot import ValidationPlot, compose_axis_label
13 from .fom import ValidationFiguresOfMerit
14 from .tolerate_missing_key_formatter import TolerateMissingKeyFormatter
15 
16 formatter = TolerateMissingKeyFormatter()
17 
18 
19 class ClassificationAnalysis(object):
20  """Perform truth-classification analysis"""
21 
22  def __init__(
23  self,
24  contact,
25  quantity_name,
26  cut_direction=None,
27  cut=None,
28  lower_bound=None,
29  upper_bound=None,
30  outlier_z_score=None,
31  allow_discrete=None,
32  unit=None
33  ):
34  """Compare an estimated quantity to the truths by generating standardized validation plots."""
35 
36 
37  self._contact = contact
38 
39  self.quantity_name = quantity_name
40 
41 
42  self.plots = collections.OrderedDict()
43 
44  self.fom = None
45 
46 
47  self.cut_direction = cut_direction
48 
49  self.cut = cut
50 
51 
52  self.lower_bound = lower_bound
53 
54  self.upper_bound = upper_bound
55 
56  self.outlier_z_score = outlier_z_score
57 
58  self.allow_discrete = allow_discrete
59 
60  self.unit = unit
61 
62  def analyse(
63  self,
64  estimates,
65  truths,
66  auxiliaries={}
67  ):
68  """Compares the concrete estimate to the truth and efficiency, purity and background rejection
69  as figure of merit and plots the selection as a stacked plot over the truths.
70 
71  Parameters
72  ----------
73  estimates : array_like
74  Selection variable to compare to the truths
75  truths : array_like
76  Binary true class values.
77  """
78 
79  quantity_name = self.quantity_name
80  axis_label = compose_axis_label(quantity_name, self.unit)
81 
82  plot_name = "{quantity_name}_{subplot_name}"
83  plot_name = formatter.format(plot_name, quantity_name=quantity_name)
84 
85  signals = truths != 0
86 
87  # Some different things become presentable depending on the estimates
88  estimate_is_binary = statistics.is_binary_series(estimates)
89 
90  if estimate_is_binary:
91  binary_estimates = estimates != 0
92  cut_value = 0.5
93  cut_direction = -1 # reject low values
94 
95  elif self.cut is not None:
96  if isinstance(self.cut, numbers.Number):
97  cut_value = self.cut
98  cut_direction = self.cut_direction
99  cut_classifier = CutClassifier(cut_direction=cut_direction, cut_value=cut_value)
100 
101  else:
102  cut_classifier = self.cut
103  cut_classifier = cut_classifier.clone()
104 
105  cut_classifier.fit(estimates, truths)
106  binary_estimates = cut_classifier.predict(estimates) != 0
107  cut_direction = cut_classifier.cut_direction
108  cut_value = cut_classifier.cut_value
109 
110  if not isinstance(self.cut, numbers.Number):
111  print(formatter.format(plot_name, subplot_name="cut_classifier"), "summary")
112  cut_classifier.describe(estimates, truths)
113 
114  else:
115  cut_value = None
116  cut_direction = self.cut_direction
117 
118  lower_bound = self.lower_bound
119  upper_bound = self.upper_bound
120 
121  # Stacked histogram
122  signal_bkg_histogram_name = formatter.format(plot_name, subplot_name="signal_bkg_histogram")
123  signal_bkg_histogram = ValidationPlot(signal_bkg_histogram_name)
124  signal_bkg_histogram.hist(
125  estimates,
126  stackby=truths,
127  lower_bound=lower_bound,
128  upper_bound=upper_bound,
129  outlier_z_score=self.outlier_z_score,
130  allow_discrete=self.allow_discrete,
131  )
132  signal_bkg_histogram.xlabel = axis_label
133 
134  if lower_bound is None:
135  lower_bound = signal_bkg_histogram.lower_bound
136 
137  if upper_bound is None:
138  upper_bound = signal_bkg_histogram.upper_bound
139 
140  self.plots['signal_bkg'] = signal_bkg_histogram
141 
142  # Purity profile
143  purity_profile_name = formatter.format(plot_name, subplot_name="purity_profile")
144 
145  purity_profile = ValidationPlot(purity_profile_name)
146  purity_profile.profile(
147  estimates,
148  truths,
149  lower_bound=lower_bound,
150  upper_bound=upper_bound,
151  outlier_z_score=self.outlier_z_score,
152  allow_discrete=self.allow_discrete,
153  )
154 
155  purity_profile.xlabel = axis_label
156  purity_profile.ylabel = 'purity'
157  self.plots["purity"] = purity_profile
158 
159  # Try to guess the cur direction form the correlation
160  if cut_direction is None:
161  purity_grapherrors = ValidationPlot.convert_tprofile_to_tgrapherrors(purity_profile.plot)
162  correlation = purity_grapherrors.GetCorrelationFactor()
163  if correlation > 0.1:
164  print("Determined cut direction", -1)
165  cut_direction = -1 # reject low values
166  elif correlation < -0.1:
167  print("Determined cut direction", 1)
168  cut_direction = +1 # reject high values
169 
170  # Figures of merit
171  if cut_value is not None:
172  fom_name = formatter.format(plot_name, subplot_name="classification_figures_of_merits")
173  fom_description = "Efficiency, purity and background rejection of the classifiction with {quantity_name}".format(
174  quantity_name=quantity_name
175  )
176 
177  fom_check = "Check that the classifcation quality stays stable."
178 
179  fom_title = "Summary of the classification quality with {quantity_name}".format(
180  quantity_name=quantity_name
181  )
182 
183  classification_fom = ValidationFiguresOfMerit(
184  name=fom_name,
185  title=fom_title,
186  description=fom_description,
187  check=fom_check,
188  contact=self.contact,
189  )
190 
191  efficiency = scores.efficiency(truths, binary_estimates)
192  purity = scores.purity(truths, binary_estimates)
193  background_rejection = scores.background_rejection(truths, binary_estimates)
194 
195  classification_fom['cut_value'] = cut_value
196  classification_fom['cut_direction'] = cut_direction
197  classification_fom['efficiency'] = efficiency
198  classification_fom['purity'] = purity
199  classification_fom['background_rejection'] = background_rejection
200 
201  self.fom = classification_fom
202  # Auxiliary hists
203  for aux_name, aux_values in auxiliaries.items():
204  if statistics.is_single_value_series(aux_values) or aux_name == quantity_name:
205  continue
206 
207  aux_axis_label = compose_axis_label(aux_name)
208 
209  # Signal + bkg distribution over estimate and auxiliary variable #
210  # ############################################################## #
211  signal_bkg_aux_hist2d_name = formatter.format(plot_name, subplot_name=aux_name + '_signal_bkg_aux2d')
212  signal_bkg_aux_hist2d = ValidationPlot(signal_bkg_aux_hist2d_name)
213  signal_bkg_aux_hist2d.hist2d(
214  aux_values,
215  estimates,
216  stackby=truths,
217  lower_bound=(None, lower_bound),
218  upper_bound=(None, upper_bound),
219  outlier_z_score=self.outlier_z_score,
220  allow_discrete=self.allow_discrete,
221  )
222 
223  aux_lower_bound = signal_bkg_aux_hist2d.lower_bound[0]
224  aux_upper_bound = signal_bkg_aux_hist2d.upper_bound[0]
225 
226  signal_bkg_aux_hist2d.xlabel = aux_axis_label
227  signal_bkg_aux_hist2d.ylabel = axis_label
228  self.plots[signal_bkg_aux_hist2d_name] = signal_bkg_aux_hist2d
229 
230  # Figures of merit as function of the auxiliary variables
231  if cut_value is not None:
232 
233  # Auxiliary purity profile #
234  # ######################## #
235  aux_purity_profile_name = formatter.format(plot_name, subplot_name=aux_name + "_aux_purity_profile")
236  aux_purity_profile = ValidationPlot(aux_purity_profile_name)
237  aux_purity_profile.profile(
238  aux_values[binary_estimates],
239  truths[binary_estimates],
240  outlier_z_score=self.outlier_z_score,
241  allow_discrete=self.allow_discrete,
242  lower_bound=aux_lower_bound,
243  upper_bound=aux_upper_bound,
244  )
245 
246  aux_purity_profile.xlabel = aux_axis_label
247  aux_purity_profile.ylabel = 'purity'
248  self.plots[aux_purity_profile_name] = aux_purity_profile
249 
250  # Auxiliary efficiency profile #
251  # ############################ #
252  aux_efficiency_profile_name = formatter.format(plot_name, subplot_name=aux_name + "_aux_efficiency_profile")
253  aux_efficiency_profile = ValidationPlot(aux_efficiency_profile_name)
254  aux_efficiency_profile.profile(
255  aux_values[signals],
256  binary_estimates[signals],
257  outlier_z_score=self.outlier_z_score,
258  allow_discrete=self.allow_discrete,
259  lower_bound=aux_lower_bound,
260  upper_bound=aux_upper_bound,
261  )
262 
263  aux_efficiency_profile.xlabel = aux_axis_label
264  aux_efficiency_profile.ylabel = 'efficiency'
265  self.plots[aux_efficiency_profile_name] = aux_efficiency_profile
266 
267  # Auxiliary bkg rejection profile #
268  # ############################### #
269  aux_bkg_rejection_profile_name = formatter.format(plot_name, subplot_name=aux_name + "_aux_bkg_rejection_profile")
270  aux_bkg_rejection_profile = ValidationPlot(aux_bkg_rejection_profile_name)
271  aux_bkg_rejection_profile.profile(
272  aux_values[~signals],
273  ~binary_estimates[~signals],
274  outlier_z_score=self.outlier_z_score,
275  allow_discrete=self.allow_discrete,
276  lower_bound=aux_lower_bound,
277  upper_bound=aux_upper_bound,
278  )
279 
280  aux_bkg_rejection_profile.xlabel = aux_axis_label
281  aux_bkg_rejection_profile.ylabel = 'bkg rejection'
282  self.plots[aux_bkg_rejection_profile_name] = aux_bkg_rejection_profile
283 
284  cut_abs = False
285  if cut_direction is None:
286  purity_grapherrors = ValidationPlot.convert_tprofile_to_tgrapherrors(purity_profile.plot,
287  abs_x=True)
288  correlation = purity_grapherrors.GetCorrelationFactor()
289  if correlation > 0.1:
290  print("Determined absolute cut direction", -1)
291  cut_direction = -1 # reject low values
292  cut_abs = True
293  elif correlation < -0.1:
294  print("Determined absolute cut direction", 1)
295  cut_direction = +1 # reject high values
296  cut_abs = True
297 
298  if cut_abs:
299  estimates = np.abs(estimates)
300  cut_x_label = "cut " + compose_axis_label("abs(" + quantity_name + ")", self.unit)
301  lower_bound = 0
302  else:
303  cut_x_label = "cut " + axis_label
304 
305  # Quantile plots
306  if not estimate_is_binary and cut_direction is not None:
307  # Signal estimate quantiles over auxiliary variable #
308  # ################################################# #
309  if cut_direction > 0:
310  quantiles = [0.5, 0.90, 0.99]
311  else:
312  quantiles = [0.01, 0.10, 0.5]
313 
314  for aux_name, aux_values in auxiliaries.items():
315  if statistics.is_single_value_series(aux_values) or aux_name == quantity_name:
316  continue
317 
318  aux_axis_label = compose_axis_label(aux_name)
319 
320  signal_quantile_aux_profile_name = formatter.format(plot_name, subplot_name=aux_name + '_signal_quantiles_aux2d')
321  signal_quantile_aux_profile = ValidationPlot(signal_quantile_aux_profile_name)
322  signal_quantile_aux_profile.hist2d(
323  aux_values[signals],
324  estimates[signals],
325  quantiles=quantiles,
326  bins=('flat', None),
327  lower_bound=(None, lower_bound),
328  upper_bound=(None, upper_bound),
329  outlier_z_score=self.outlier_z_score,
330  allow_discrete=self.allow_discrete,
331  )
332  signal_quantile_aux_profile.xlabel = aux_axis_label
333  signal_quantile_aux_profile.ylabel = cut_x_label
334  self.plots[signal_quantile_aux_profile_name] = signal_quantile_aux_profile
335 
336  # ROC plots
337  if not estimate_is_binary and cut_direction is not None:
338  n_data = len(estimates)
339  n_signals = scores.signal_amount(truths, estimates)
340  n_bkgs = n_data - n_signals
341 
342  # work around for numpy sorting nan values as high but we want it as low depending on the cut direction
343  if cut_direction < 0: # reject low
344  sorting_indices = np.argsort(-estimates)
345  else:
346  sorting_indices = np.argsort(estimates)
347 
348  sorted_truths = truths[sorting_indices]
349  sorted_estimates = estimates[sorting_indices]
350 
351  sorted_n_accepted_signals = np.cumsum(sorted_truths, dtype=float)
352  sorted_efficiencies = sorted_n_accepted_signals / n_signals
353 
354  sorted_n_rejected_signals = n_signals - sorted_n_accepted_signals
355  sorted_n_rejects = np.arange(len(estimates) + 1, 1, -1)
356  sorted_n_rejected_bkgs = sorted_n_rejects - sorted_n_rejected_signals
357  sorted_bkg_rejections = sorted_n_rejected_bkgs / n_bkgs
358 
359  # Efficiency by cut value #
360  # ####################### #
361  efficiency_by_cut_profile_name = formatter.format(plot_name, subplot_name="efficiency_by_cut")
362 
363  efficiency_by_cut_profile = ValidationPlot(efficiency_by_cut_profile_name)
364  efficiency_by_cut_profile.profile(
365  sorted_estimates,
366  sorted_efficiencies,
367  lower_bound=lower_bound,
368  upper_bound=upper_bound,
369  outlier_z_score=self.outlier_z_score,
370  allow_discrete=self.allow_discrete,
371  )
372 
373  efficiency_by_cut_profile.xlabel = cut_x_label
374  efficiency_by_cut_profile.ylabel = "efficiency"
375 
376  self.plots["efficiency_by_cut"] = efficiency_by_cut_profile
377 
378  # Background rejection over cut value #
379  # ################################### #
380  bkg_rejection_by_cut_profile_name = formatter.format(plot_name, subplot_name="bkg_rejection_by_cut")
381  bkg_rejection_by_cut_profile = ValidationPlot(bkg_rejection_by_cut_profile_name)
382  bkg_rejection_by_cut_profile.profile(
383  sorted_estimates,
384  sorted_bkg_rejections,
385  lower_bound=lower_bound,
386  upper_bound=upper_bound,
387  outlier_z_score=self.outlier_z_score,
388  allow_discrete=self.allow_discrete,
389  )
390 
391  bkg_rejection_by_cut_profile.xlabel = cut_x_label
392  bkg_rejection_by_cut_profile.ylabel = "background rejection"
393 
394  self.plots["bkg_rejection_by_cut"] = bkg_rejection_by_cut_profile
395 
396  # Purity over efficiency #
397  # ###################### #
398  purity_over_efficiency_profile_name = formatter.format(plot_name, subplot_name="purity_over_efficiency")
399  purity_over_efficiency_profile = ValidationPlot(purity_over_efficiency_profile_name)
400  purity_over_efficiency_profile.profile(
401  sorted_efficiencies,
402  sorted_truths,
403  cumulation_direction=1,
404  lower_bound=0,
405  upper_bound=1
406  )
407  purity_over_efficiency_profile.xlabel = 'efficiency'
408  purity_over_efficiency_profile.ylabel = 'purity'
409 
410  self.plots["purity_over_efficiency"] = purity_over_efficiency_profile
411 
412  # Cut over efficiency #
413  # ################### #
414  cut_over_efficiency_profile_name = formatter.format(plot_name, subplot_name="cut_over_efficiency")
415  cut_over_efficiency_profile = ValidationPlot(cut_over_efficiency_profile_name)
416  cut_over_efficiency_profile.profile(
417  sorted_efficiencies,
418  sorted_estimates,
419  lower_bound=0,
420  upper_bound=1,
421  outlier_z_score=self.outlier_z_score,
422  allow_discrete=self.allow_discrete,
423  )
424  cut_over_efficiency_profile.set_minimum(lower_bound)
425  cut_over_efficiency_profile.set_maximum(upper_bound)
426  cut_over_efficiency_profile.xlabel = 'efficiency'
427  cut_over_efficiency_profile.ylabel = cut_x_label
428 
429  self.plots["cut_over_efficiency"] = cut_over_efficiency_profile
430 
431  # Cut over bkg_rejection #
432  # ###################### #
433  cut_over_bkg_rejection_profile_name = formatter.format(plot_name, subplot_name="cut_over_bkg_rejection")
434  cut_over_bkg_rejection_profile = ValidationPlot(cut_over_bkg_rejection_profile_name)
435  cut_over_bkg_rejection_profile.profile(
436  sorted_bkg_rejections,
437  sorted_estimates,
438  lower_bound=0,
439  upper_bound=1,
440  outlier_z_score=self.outlier_z_score,
441  allow_discrete=self.allow_discrete,
442  )
443  cut_over_bkg_rejection_profile.set_minimum(lower_bound)
444  cut_over_bkg_rejection_profile.set_maximum(upper_bound)
445  cut_over_bkg_rejection_profile.xlabel = 'bkg_rejection'
446  cut_over_bkg_rejection_profile.ylabel = cut_x_label
447 
448  self.plots["cut_over_bkg_rejection"] = cut_over_bkg_rejection_profile
449 
450  # Efficiency over background rejection #
451  # #################################### #
452  efficiency_over_bkg_rejection_profile_name = formatter.format(plot_name, subplot_name="efficiency_over_bkg_rejection")
453  efficiency_over_bkg_rejection_profile = ValidationPlot(efficiency_over_bkg_rejection_profile_name)
454  efficiency_over_bkg_rejection_profile.profile(
455  sorted_bkg_rejections,
456  sorted_efficiencies,
457  lower_bound=0,
458  upper_bound=1
459  )
460 
461  efficiency_over_bkg_rejection_profile.xlabel = "bkg rejection"
462  efficiency_over_bkg_rejection_profile.ylabel = "efficiency"
463 
464  self.plots["efficiency_over_bkg_rejection"] = efficiency_over_bkg_rejection_profile
465 
466 
467  self.contact = self.contact
468 
469  @property
470  def contact(self):
471  """Get the name of the contact person"""
472  return self._contact
473 
474  @contact.setter
475  def contact(self, contact):
476  """Set the name of the contact person"""
477  self._contact = contact
478 
479  for plot in list(self.plots.values()):
480  plot.contact = contact
481 
482  if self.fom:
483  self.fom.contact = contact
484 
485  def write(self, tdirectory=None):
486  """Write the plots to the ROOT TDirectory"""
487  for plot in list(self.plots.values()):
488  plot.write(tdirectory)
489 
490  if self.fom:
491  self.fom.write(tdirectory)
492 
493 
494 class CutClassifier(object):
495 
496  """Simple classifier cutting on a single variable"""
497 
498  def __init__(self, cut_direction=1, cut_value=np.nan):
499  """Constructor"""
500 
501  self.cut_direction_ = cut_direction
502 
503  self.cut_value_ = cut_value
504 
505  @property
506  def cut_direction(self):
507  """Get the value of the cut direction"""
508  return self.cut_direction_
509 
510  @property
511  def cut_value(self):
512  """Get the value of the cut threshold"""
513  return self.cut_value_
514 
515  def clone(self):
516  """Return a clone of this object"""
517  return copy.copy(self)
518 
519  def determine_cut_value(self, estimates, truths):
520  """Get the value of the cut threshold"""
521  return self.cut_value_ # do not change cut value from constructed one
522 
523  def fit(self, estimates, truths):
524  """Fit to determine the cut threshold"""
525  self.cut_value_ = self.determine_cut_value(estimates, truths)
526  return self
527 
528  def predict(self, estimates):
529  """Select estimates that satisfy the cut"""
530  if self.cut_value_ is None:
531  raise ValueError("Cut value not set. Forgot to fit?")
532 
533  if self.cut_direction_ < 0:
534  binary_estimates = estimates >= self.cut_value_
535  else:
536  binary_estimates = estimates <= self.cut_value_
537 
538  return binary_estimates
539 
540  def describe(self, estimates, truths):
541  """Describe the cut selection and its efficiency, purity and background rejection"""
542  if self.cut_direction_ < 0:
543  print("Cut accepts >= ", self.cut_value_, 'with')
544  else:
545  print("Cut accepts <= ", self.cut_value_, 'with')
546 
547  binary_estimates = self.predict(estimates)
548 
549  efficiency = scores.efficiency(truths, binary_estimates)
550  purity = scores.purity(truths, binary_estimates)
551  background_rejection = scores.background_rejection(truths, binary_estimates)
552 
553  print("efficiency", efficiency)
554  print("purity", purity)
555  print("background_rejection", background_rejection)
556 
557 
558 def cut_at_background_rejection(background_rejection=0.5, cut_direction=1):
559  return CutAtBackgroundRejectionClassifier(background_rejection, cut_direction)
560 
561 
563  """Apply cut on the background rejection"""
564 
565  def __init__(self, background_rejection=0.5, cut_direction=1):
566  """Constructor"""
567  super(CutAtBackgroundRejectionClassifier, self).__init__(cut_direction=cut_direction, cut_value=np.nan)
568 
569  self.background_rejection = background_rejection
570 
571  def determine_cut_value(self, estimates, truths):
572  """Find the cut value that satisfies the desired background-rejection level"""
573  n_data = len(estimates)
574  n_signals = scores.signal_amount(truths, estimates)
575  n_bkgs = n_data - n_signals
576 
577  sorting_indices = np.argsort(estimates)
578  if self.cut_direction_ < 0: # reject low
579  # Keep a reference to keep the content alive
580  orginal_sorting_indices = sorting_indices
581  sorting_indices = sorting_indices[::-1]
582 
583  sorted_truths = truths[sorting_indices]
584  sorted_estimates = estimates[sorting_indices]
585 
586  sorted_n_accepted_signals = np.cumsum(sorted_truths, dtype=float)
587  sorted_efficiencies = sorted_n_accepted_signals / n_signals
588 
589  sorted_n_rejected_signals = n_signals - sorted_n_accepted_signals
590  sorted_n_rejects = np.arange(len(estimates) + 1, 1, -1)
591  sorted_n_rejected_bkgs = sorted_n_rejects - sorted_n_rejected_signals
592  sorted_bkg_rejections = sorted_n_rejected_bkgs / n_bkgs
593 
594  cut_index, = np.searchsorted(sorted_bkg_rejections[::-1], (self.background_rejection,), side='right')
595 
596  cut_value = sorted_estimates[-cut_index - 1]
597  return cut_value
tracking.validation.fom.ValidationFiguresOfMerit
Definition: fom.py:11
tracking.validation.classification.ClassificationAnalysis.outlier_z_score
outlier_z_score
cached Z-score (for outlier detection) for this truth-classification analysis
Definition: classification.py:45
tracking.validation.classification.ClassificationAnalysis.plots
plots
cached dictionary of plots in the truth-classification analysis
Definition: classification.py:31
tracking.validation.classification.ClassificationAnalysis
Definition: classification.py:19
tracking.validation.classification.ClassificationAnalysis._contact
_contact
cached contact person of the truth-classification analysis
Definition: classification.py:26
tracking.validation.classification.ClassificationAnalysis.contact
contact
contact person
Definition: classification.py:462
tracking.validation.classification.ClassificationAnalysis.analyse
def analyse(self, estimates, truths, auxiliaries={})
Definition: classification.py:62
tracking.validation.classification.ClassificationAnalysis.fom
fom
cached value of the figure of merit in the truth-classification analysis
Definition: classification.py:33
tracking.validation.classification.CutClassifier.fit
def fit(self, estimates, truths)
Definition: classification.py:523
tracking.validation.classification.ClassificationAnalysis.cut
cut
cached value of the threshold in the truth-classification analysis
Definition: classification.py:38
tracking.validation.classification.ClassificationAnalysis.__init__
def __init__(self, contact, quantity_name, cut_direction=None, cut=None, lower_bound=None, upper_bound=None, outlier_z_score=None, allow_discrete=None, unit=None)
Definition: classification.py:22
tracking.validation.classification.CutClassifier.predict
def predict(self, estimates)
Definition: classification.py:528
tracking.validation.classification.CutClassifier
Definition: classification.py:494
tracking.validation.classification.CutClassifier.cut_value_
cut_value_
cached copy of the cut threshold
Definition: classification.py:503
tracking.validation.classification.CutAtBackgroundRejectionClassifier.determine_cut_value
def determine_cut_value(self, estimates, truths)
Definition: classification.py:571
tracking.validation.classification.ClassificationAnalysis.unit
unit
cached measurement unit for this truth-classification analysis
Definition: classification.py:49
tracking.validation.classification.CutClassifier.__init__
def __init__(self, cut_direction=1, cut_value=np.nan)
Definition: classification.py:498
tracking.validation.classification.CutAtBackgroundRejectionClassifier.background_rejection
background_rejection
cachec copy of the background-rejection threshold
Definition: classification.py:569
tracking.validation.classification.CutClassifier.describe
def describe(self, estimates, truths)
Definition: classification.py:540
tracking.validation.plot.ValidationPlot
Definition: plot.py:152
tracking.validation.classification.CutAtBackgroundRejectionClassifier.__init__
def __init__(self, background_rejection=0.5, cut_direction=1)
Definition: classification.py:565
tracking.validation.classification.ClassificationAnalysis.cut_direction
cut_direction
cached value of the cut direction (< or >) in the truth-classification analysis
Definition: classification.py:36
tracking.validation.classification.ClassificationAnalysis.upper_bound
upper_bound
cached upper bound for this truth-classification analysis
Definition: classification.py:43
tracking.validation.classification.ClassificationAnalysis.lower_bound
lower_bound
cached lower bound for this truth-classification analysis
Definition: classification.py:41
tracking.validation.classification.ClassificationAnalysis.write
def write(self, tdirectory=None)
Definition: classification.py:485
tracking.validation.classification.ClassificationAnalysis.quantity_name
quantity_name
cached name of the quantity in the truth-classification analysis
Definition: classification.py:28
tracking.validation.classification.CutClassifier.cut_direction
def cut_direction(self)
Definition: classification.py:506
tracking.validation.classification.CutAtBackgroundRejectionClassifier
Definition: classification.py:562
tracking.validation.classification.CutClassifier.determine_cut_value
def determine_cut_value(self, estimates, truths)
Definition: classification.py:519
tracking.validation.classification.CutClassifier.clone
def clone(self)
Definition: classification.py:515
tracking.validation.classification.ClassificationAnalysis.allow_discrete
allow_discrete
cached discrete-value flag for this truth-classification analysis
Definition: classification.py:47
tracking.validation.classification.CutClassifier.cut_value
def cut_value(self)
Definition: classification.py:511
tracking.validation.classification.CutClassifier.cut_direction_
cut_direction_
cached copy of the cut direction (< or >)
Definition: classification.py:501