Belle II Software  release-06-00-14
flavorTaggerEfficiency.py
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 
4 
11 
12 # ************** Flavor Tagging **************
13 # * *
14 # * This script calculates the parameters char- *
15 # * acterizing the performance of the flavor *
16 # * tagger. It produces plots for the qr *
17 # * distribution of the combiners and the dis- *
18 # * tributions of the combiner input variables. *
19 # * *
20 # ***********************************************
21 
22 import ROOT
23 from basf2 import B2INFO
24 import flavorTagger as ft
25 from defaultEvaluationParameters import categories, Quiet, r_subsample, r_size
26 from array import array
27 import pickle
28 import math
29 import glob
30 import sys
31 
32 ROOT.gROOT.SetBatch(True)
33 
34 if len(sys.argv) != 3:
35  sys.exit("Must provide 2 arguments: [input_sim_file] or ['input_sim_file*'] with wildcards and [treeName]"
36  )
37 workingFile = sys.argv[1]
38 workingFiles = glob.glob(str(workingFile))
39 treeName = str(sys.argv[2])
40 
41 if len(workingFiles) < 1:
42  sys.exit("No file name or file names " + str(workingFile) + " found.")
43 
44 
45 workingDirectory = '.'
46 
47 #
48 # *******************************************
49 # DETERMINATION OF TOTAL EFFECTIVE EFFICIENCY
50 # *******************************************
51 #
52 
53 # working directory
54 # needs the B0bar_final.root-file
55 # treeName = 'B0tree'
56 
57 
58 methods = []
59 
60 tree = ROOT.TChain(treeName)
61 
62 mcstatus = array('d', [-511.5, 0.0, 511.5])
63 ROOT.TH1.SetDefaultSumw2()
64 
65 for iFile in workingFiles:
66  tree.AddFile(iFile)
67 
68 totalBranches = []
69 for branch in tree.GetListOfBranches():
70  totalBranches.append(branch.GetName())
71 
72 if 'FBDT_qrCombined' in totalBranches:
73  methods.append("FBDT")
74 
75 if 'FANN_qrCombined' in totalBranches:
76  methods.append("FANN")
77 
78 if 'DNN_qrCombined' in totalBranches:
79  methods.append("DNN")
80 
81 usedCategories = []
82 for cat in categories:
83  catBranch = 'qp' + cat
84  if catBranch in totalBranches:
85  usedCategories.append(cat)
86 
87 if len(usedCategories) > 1:
88  ft.WhichCategories(usedCategories)
89 
90 YmaxForQrPlot = 0
91 
92 total_notTagged = 0
93 
94 for method in methods:
95  # Get error with GetBinError(), set error with SetBinError()
96  # histogram contains the average r in each of 7 bins -> calculation see below
97  histo_avr_r = ROOT.TH1F('Average_r', 'Average r in each of the bins (B0 and B0bar)', int(r_size - 2),
98  r_subsample)
99  histo_avr_rB0 = ROOT.TH1F('Average_rB0', 'Average r in each of the bins (B0)', int(r_size - 2),
100  r_subsample)
101  histo_avr_rB0bar = ROOT.TH1F('Average_rB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
102  r_subsample)
103 
104  # histogram containing the wrong tags calculated by counting Wrong tags
105  histo_mc_NwB0 = ROOT.TH1F('mc_NwB0', 'Average r in each of the bins (B0)', int(r_size - 2),
106  r_subsample)
107  histo_mc_NwB0bar = ROOT.TH1F('mc_NwB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
108  r_subsample)
109 
110  # histogram contains the mean squared of r in each of 7 bins -> calculation see below
111  histo_ms_r = ROOT.TH1F('MS_r', 'Mean squared average of r in each of the bins (B0 and B0bar)', int(r_size - 2),
112  r_subsample)
113  histo_ms_rB0 = ROOT.TH1F('MS_rB0', 'Mean squared average of r in each of the bins (B0)', int(r_size - 2),
114  r_subsample)
115  histo_ms_rB0bar = ROOT.TH1F('MS_rB0bar', 'Mean squared average of r in each of the bins (B0bar)', int(r_size - 2),
116  r_subsample)
117 
118  # histogram with number of entries in for each bin
119  histo_entries_per_bin = ROOT.TH1F(
120  'entries_per_bin',
121  'Events binned in r_subsample according to their r-value for B0 and B0bar prob',
122  int(r_size - 2),
123  r_subsample)
124  histo_entries_per_binB0 = ROOT.TH1F('entries_per_binB0', 'Events binned in r_subsample according '
125  'to their r-value for B0 prob', int(r_size - 2), r_subsample)
126  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar',
127  'Events binned in r_subsample according to their r-value '
128  'for B0bar prob', int(r_size - 2), r_subsample)
129  # histogram network output (not qr and not r) for true B0 (signal) - not necessary
130  histo_Cnet_output_B0 = ROOT.TH1F('Comb_Net_Output_B0', 'Combiner network output [not equal to r] '
131  'for true B0 (binning 100)', 100, 0.0, 1.0)
132  # histogram network output (not qr and not r) for true B0bar (background) - not necessary
133  histo_Cnet_output_B0bar = ROOT.TH1F('Comb_Net_Output_B0bar', 'Combiner network output [not equal to r] '
134  'for true B0bar (binning 100)', 100, 0.0, 1.0)
135  # histogram containing the belle paper plot (qr-tagger output for true B0)
136  histo_belleplotB0 = ROOT.TH1F('BellePlot_B0',
137  'BellePlot for true B0 (binning 50)', 50,
138  -1.0, 1.0)
139  # histogram containing the belle paper plot (qr-tagger output for true B0bar)
140  histo_belleplotB0bar = ROOT.TH1F('BellePlot_B0Bar',
141  'BellePlot for true B0Bar (binning 50)',
142  50, -1.0, 1.0)
143 
144  histo_notTaggedEvents = ROOT.TH1F('notTaggedEvents',
145  'Histogram for not tagged events',
146  1, -3.0, -1.0)
147 
148  # calibration plot for B0. If we get a linaer line our MC is fine, than the assumption r ~ 1- 2w is reasonable
149  # expectation is, that for B0 calibration plot: qr=0 half B0 and half B0bar, qr = 1 only B0 and qr = -1
150  # no B0. Inverse for B0bar calibration plot
151  histo_calib_B0 = ROOT.TH1F('Calibration_B0', 'CalibrationPlot for true B0', 100, -1.0, 1.0)
152  # calibration plot for B0bar calibration plot
153  histo_calib_B0bar = ROOT.TH1F('Calibration_B0Bar',
154  'CalibrationPlot for true B0Bar', 100, -1.0,
155  1.0)
156  # belle plot with true B0 and B0bars
157  hallo12 = ROOT.TH1F('BellePlot_NoCut', 'BellePlot_NoCut (binning 100)',
158  100, -1.0, 1.0)
159 
160 
161  diag = ROOT.TF1('diag', 'pol1', -1, 1)
162 
163 
164  # histograms for the efficiency calculation in wrong way
165  histo_m0 = ROOT.TH1F('BellePlot_m0',
166  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
167  histo_m1 = ROOT.TH1F('BellePlot_m1',
168  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
169  histo_m2 = ROOT.TH1F('BellePlot_m2',
170  'BellePlot_m for true B0Bar (binning 50)', 50, -1.0,
171  1.0)
172 
173  # filling the histograms
174 
175  tree.Draw(method + '_qrCombined>>BellePlot_B0', 'qrMC == 1')
176  tree.Draw(method + '_qrCombined>>BellePlot_B0Bar', 'qrMC == -1')
177  tree.Draw(method + '_qrCombined>>BellePlot_NoCut', 'abs(qrMC) == 1')
178 
179  tree.Draw(method + '_qrCombined>>Calibration_B0', 'qrMC == 1')
180  tree.Draw(method + '_qrCombined>>Calibration_B0Bar', 'qrMC == -1')
181 
182  tree.Draw(method + '_qrCombined>>notTaggedEvents',
183  'abs(qrMC) == 0 && isSignal == 1 && ' +
184  method + '_qrCombined < -1')
185 
186  # filling histograms wrong efficiency calculation
187  tree.Draw(method + '_qrCombined>>BellePlot_m0',
188  'qrMC == 1 && ' + method + '_qrCombined>0')
189  tree.Draw(method + '_qrCombined>>BellePlot_m1',
190  'qrMC == 1 && ' + method + '_qrCombined<0')
191  tree.Draw(method + '_qrCombined>>BellePlot_m2',
192  'qrMC == -1 && ' + method + '_qrCombined>0 ')
193 
194  # filling with abs(qr) in one of 6 bins with its weight
195  # separate calculation for B0 and B0bar
196 
197  tree.Project('Average_r', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(abs(qrMC) == 1)')
198  tree.Project('Average_rB0', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==1)')
199  tree.Project('Average_rB0bar', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==-1)')
200 
201  tree.Project('MS_r', 'abs(' + method + '_qrCombined)', '(' + method +
202  '_qrCombined*' + method + '_qrCombined)*(abs(qrMC) == 1)')
203  tree.Project('MS_rB0', 'abs(' + method + '_qrCombined)',
204  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==1)')
205  tree.Project('MS_rB0bar', 'abs(' + method + '_qrCombined)',
206  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==-1)')
207 
208  # filling with abs(qr) in one of 6 bins
209  tree.Project('entries_per_bin', 'abs(' + method + '_qrCombined)', 'abs(qrMC) == 1')
210  tree.Project('entries_per_binB0', 'abs(' + method + '_qrCombined)', 'qrMC == 1')
211  tree.Project('entries_per_binB0bar', 'abs(' + method + '_qrCombined)', 'qrMC == -1')
212 
213  # filling histograms with number of wrong tags per r-bin
214  tree.Project('mc_NwB0', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == 1')
215  tree.Project('mc_NwB0bar', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == -1')
216 
217  # producing the average r histograms
218  histo_avr_r.Divide(histo_entries_per_bin)
219  histo_avr_rB0.Divide(histo_entries_per_binB0)
220  histo_avr_rB0bar.Divide(histo_entries_per_binB0bar)
221 
222  histo_ms_r.Divide(histo_entries_per_bin)
223  histo_ms_rB0.Divide(histo_entries_per_binB0)
224  histo_ms_rB0bar.Divide(histo_entries_per_binB0bar)
225 
226  # producing the calibration plots
227  # Errors ok
228  histo_calib_B0.Divide(hallo12)
229  histo_calib_B0bar.Divide(hallo12)
230 
231  # Fit for calibration plot
232  print(' ')
233  print('****************************** CALIBRATION CHECK FOR COMBINER USING ' +
234  method + ' ***************************************')
235  print(' ')
236  print('Fit ploynomial of first order to the calibration plot. Expected value ~0.5')
237  print(' ')
238  histo_calib_B0.Fit(diag, 'TEST')
239  print(' ')
240  print('****************************** MEASURED EFFECTIVE EFFICIENCY FOR COMBINER USING ' +
241  method + ' ***********************************')
242  print('* ' +
243  ' *')
244  # get total number of entries
245  total_tagged = histo_entries_per_bin.GetEntries()
246  total_tagged_B0 = histo_entries_per_binB0.GetEntries()
247  total_tagged_B0bar = histo_entries_per_binB0bar.GetEntries()
248  total_notTagged = histo_notTaggedEvents.GetEntries()
249  total_entries = (total_tagged + total_notTagged)
250  # To a good approximation we assume that half of the not tagged B mesons were B0 (B0bar)
251  total_entriesB0 = (total_tagged_B0 + total_notTagged / 2)
252  total_entriesB0bar = (total_tagged_B0bar + total_notTagged / 2)
253 
254  tagging_eff = total_tagged / (total_tagged + total_notTagged)
255  DeltaTagging_eff = math.sqrt(total_tagged * total_notTagged**2 + total_notTagged * total_tagged**2) / (total_entries**2)
256  tot_eff_effB0 = 0
257  tot_eff_effB0bar = 0
258  average_eff_eff = 0
259  uncertainty_eff_effB0 = 0
260  uncertainty_eff_effB0bar = 0
261  uncertainty_eff_effAverage = 0
262  diff_eff_Uncertainty = 0
263  event_fractionB0 = array('f', [0] * r_size)
264  event_fractionB0bar = array('f', [0] * r_size)
265  event_fractionTotal = array('f', [0] * r_size)
266  event_fractionTotalUncertainty = array('f', [0] * r_size)
267  eventsInBin_B0 = array('f', [0] * r_size)
268  eventsInBin_B0bar = array('f', [0] * r_size)
269  eventsInBin_Total = array('f', [0] * r_size)
270  event_fractionDiff = array('f', [0] * r_size)
271  event_fractionDiffUncertainty = array('f', [0] * r_size)
272  rvalueB0 = array('f', [0] * r_size)
273  rvalueB0bar = array('f', [0] * r_size)
274  rvalueB0Average = array('f', [0] * r_size)
275  rvalueStdB0 = array('f', [0] * r_size)
276  rvalueStdB0bar = array('f', [0] * r_size)
277  rvalueStdB0Average = array('f', [0] * r_size)
278  wvalue = array('f', [0] * r_size)
279  wvalueUncertainty = array('f', [0] * r_size)
280  wvalueB0 = array('f', [0] * r_size)
281  wvalueB0bar = array('f', [0] * r_size)
282  wvalueB0Uncertainty = array('f', [0] * r_size)
283  wvalueB0barUncertainty = array('f', [0] * r_size)
284  wvalueDiff = array('f', [0] * r_size)
285  wvalueDiffUncertainty = array('f', [0] * r_size)
286  entries = array('f', [0] * r_size)
287  entriesB0 = array('f', [0] * r_size)
288  entriesB0bar = array('f', [0] * r_size)
289  iEffEfficiency = array('f', [0] * r_size)
290  iEffEfficiencyUncertainty = array('f', [0] * r_size)
291  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
292  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
293  iEffEfficiencyB0UncertaintyFromOutput = array('f', [0] * r_size)
294  iEffEfficiencyB0barUncertaintyFromOutput = array('f', [0] * r_size)
295 
296  iDeltaEffEfficiency = array('f', [0] * r_size)
297  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
298  muParam = array('f', [0] * r_size)
299  muParamUncertainty = array('f', [0] * r_size)
300  # intervallEff = array('f', [0] * r_size)
301 
302  print('* --> DETERMINATION BASED ON MONTE CARLO ' +
303  ' *')
304  print('* ' +
305  ' *')
306  print('* Note: mu = Delta_Effcy/(2*Efficiency). Needed for CP analysis ' +
307  'together with w and Delta_w *')
308  print('* ' +
309  ' *')
310  print('* ------------------------------------------------------------------' +
311  '-------------------------------------------------- *')
312  print('* r-interval <r> Efficiency Delta_Effcy ' +
313  ' mu w Delta_w *')
314  print('* ------------------------------------------------------------------' +
315  '-------------------------------------------------- *')
316  performance = []
317  for i in range(1, r_size):
318  # get the average r-value
319  entries[i] = histo_entries_per_bin.GetBinContent(i)
320  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
321  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
322  # fraction of events/all events
323 
324  event_fractionB0[i] = entriesB0[i] / total_entriesB0
325  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
326 
327  # event_fractionTotal[i] = (entriesB0[i] + entriesB0bar[i]) / total_entries
328  # event_fractionDiff[i] = (entriesB0[i] - entriesB0bar[i]) / total_entries
329 
330  event_fractionTotal[i] = (event_fractionB0[i] + event_fractionB0bar[i]) / 2
331  event_fractionDiff[i] = event_fractionB0[i] - event_fractionB0bar[i]
332 
333  event_fractionDiffUncertainty[i] = math.sqrt(entriesB0[i] *
334  (total_entriesB0 -
335  entriesB0[i]) /
336  total_entriesB0**3 +
337  entriesB0bar[i] *
338  (total_entriesB0bar -
339  entriesB0bar[i]) /
340  total_entriesB0bar**3)
341 
342  event_fractionTotalUncertainty[i] = event_fractionDiffUncertainty[i] / 2
343 
344  rvalueB0[i] = histo_avr_rB0.GetBinContent(i)
345  rvalueB0bar[i] = histo_avr_rB0bar.GetBinContent(i)
346  rvalueB0Average[i] = histo_avr_r.GetBinContent(i) # (rvalueB0[i] + rvalueB0bar[i]) / 2
347  rvalueStdB0[i] = math.sqrt(histo_ms_rB0.GetBinContent(
348  i) - (histo_avr_rB0.GetBinContent(i))**2) / math.sqrt(entriesB0[i] - 1)
349  rvalueStdB0bar[i] = math.sqrt(histo_ms_rB0bar.GetBinContent(
350  i) - (histo_avr_rB0bar.GetBinContent(i))**2) / math.sqrt(entriesB0bar[i] - 1)
351  rvalueStdB0Average[i] = math.sqrt(rvalueStdB0[i]**2 + rvalueStdB0bar[i]**2) / 2
352  # math.sqrt(histo_ms_r.GetBinContent(i) - (histo_avr_r.GetBinContent(i))**2)
353  # calculate the wrong tag fractin (only true if MC data good)
354 
355  wvalueB0[i] = histo_mc_NwB0.GetBinContent(i) / entriesB0[i] # (1 - rvalueB0[i]) / 2
356  wvalueB0bar[i] = histo_mc_NwB0bar.GetBinContent(i) / entriesB0bar[i] # (1 - rvalueB0bar[i]) / 2
357  wvalueDiff[i] = wvalueB0[i] - wvalueB0bar[i]
358  wvalueB0Uncertainty[i] = math.sqrt(histo_mc_NwB0.GetBinContent(
359  i) * (entriesB0[i] - histo_mc_NwB0.GetBinContent(i)) / (entriesB0[i]**3))
360  wvalueB0barUncertainty[i] = math.sqrt(histo_mc_NwB0bar.GetBinContent(
361  i) * (entriesB0bar[i] - histo_mc_NwB0bar.GetBinContent(i)) / (entriesB0bar[i]**3))
362  # math.sqrt((rvalueStdB0[i] / 2)**2 + (rvalueStdB0bar[i] / 2)**2)
363  wvalueDiffUncertainty[i] = math.sqrt(wvalueB0Uncertainty[i]**2 + wvalueB0barUncertainty[i]**2)
364  wvalue[i] = (wvalueB0[i] + wvalueB0bar[i]) / 2
365  wvalueUncertainty[i] = wvalueDiffUncertainty[i] / 2
366 
367  # Avr Efficiency
368 
369  # iEffEfficiency[i] = (event_fractionB0[i] * rvalueB0[i] * rvalueB0[i] +
370  # event_fractionB0bar[i] * rvalueB0bar[i] * rvalueB0bar[i]) / 2
371 
372  iEffEfficiency[i] = event_fractionTotal[i] * (1 - 2 * wvalue[i])**2
373 
374  iEffEfficiencyUncertainty[i] = (1 - 2 * wvalue[i]) * \
375  math.sqrt((2 * event_fractionTotal[i] * 2 * wvalueUncertainty[i])**2 +
376  (1 - 2 * wvalue[i])**2 * event_fractionTotalUncertainty[i]**2)
377 
378  # iEffEfficiencyUncertainty[i] = rvalueB0Average[i] * \
379  # math.sqrt((2 * total_entries * entries[i] * rvalueStdB0Average[i])**2 +
380  # rvalueB0Average[i]**2 * entries[i] *
381  # (total_entries * (total_entries - entries[i]) +
382  # entries[i] * total_notTagged)) / (total_entries**2)
383 
384  # iEffEfficiencyB0UncertaintyFromOutput[i] = rvalueB0[i] * \
385  # math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
386  # rvalueB0[i]**2 * entriesB0[i] *
387  # total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
388  # iEffEfficiencyB0barUncertaintyFromOutput[i] = rvalueB0bar[i] * \
389  # math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
390  # rvalueB0bar[i]**2 * entriesB0bar[i] *
391  # total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
392 
393  # iEffEfficiencyUncertainty[i] =
394  # math.sqrt(iEffEfficiencyB0UncertaintyFromOutput[i]**2 +
395  # iEffEfficiencyB0barUncertaintyFromOutput[i]**2) / 2
396 
397  # iEffEfficiency[i] = (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
398 
399  average_eff_eff += iEffEfficiency[i]
400  # average_eff_eff += (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
401 
402  # Delta Eff
403 
404  iDeltaEffEfficiency[i] = event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2 - \
405  event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
406 
407  iEffEfficiencyB0Uncertainty[i] = (1 - 2 * wvalueB0[i]) * \
408  math.sqrt((2 * total_entriesB0 * entriesB0[i] * 2 * wvalueB0Uncertainty[i])**2 +
409  (1 - 2 * wvalueB0[i])**2 * entriesB0[i] *
410  total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
411  iEffEfficiencyB0barUncertainty[i] = (1 - 2 * wvalueB0bar[i]) * \
412  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * 2 * wvalueB0barUncertainty[i])**2 +
413  (1 - 2 * wvalueB0bar[i])**2 * entriesB0bar[i] *
414  total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
415 
416  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
417  # iEffEfficiencyUncertainty[i] = iDeltaEffEfficiencyUncertainty[i]/2
418 
419  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
420 
421  # finally calculating the total effective efficiency
422  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2
423  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
424  uncertainty_eff_effAverage = uncertainty_eff_effAverage + iEffEfficiencyUncertainty[i]**2
425  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
426  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
427  muParam[i] = event_fractionDiff[i] / (2 * event_fractionTotal[i])
428  muParamUncertainty[i] = event_fractionDiffUncertainty[i] / (2 * event_fractionTotal[i]) * math.sqrt(muParam[i]**2 + 1)
429 
430  # intervallEff[i] = event_fractionTotal[i] * rvalueB0Average[i] * rvalueB0Average[i]
431  print('* ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + ' ' +
432  '{:.3f}'.format(rvalueB0Average[i]) + ' +- ' + '{:.4f}'.format(rvalueStdB0Average[i]) + ' ' +
433  '{:.4f}'.format(event_fractionTotal[i]) + ' ' +
434  '{: .4f}'.format(event_fractionDiff[i]) + ' +- ' + '{:.4f}'.format(event_fractionDiffUncertainty[i]) + ' ' +
435  '{: .4f}'.format(muParam[i]) + ' +- ' + '{:.4f}'.format(muParamUncertainty[i]) + ' ' +
436  '{:.4f}'.format(wvalue[i]) + ' +- ' + '{:.4f}'.format(wvalueUncertainty[i]) + ' ' +
437  '{: .4f}'.format(wvalueDiff[i]) + ' +- ' + '{:.4f}'.format(wvalueDiffUncertainty[i]) + ' *')
438 
439  # average_eff_eff = (tot_eff_effB0 + tot_eff_effB0bar) / 2
440  uncertainty_eff_effAverage = math.sqrt(uncertainty_eff_effAverage)
441  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
442  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
443  diff_eff = tot_eff_effB0 - tot_eff_effB0bar
444  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
445  print('* --------------------------------------------------------------------------------------------------' +
446  '------------------ *')
447  print('* *')
448  print('* __________________________________________________________________________________________ *')
449  print('* | | *')
450  print('* | TOTAL NUMBER OF TAGGED EVENTS = ' +
451  '{:<24}'.format("%.0f" % total_tagged) + '{:>36}'.format('| *'))
452  print('* | | *')
453  print(
454  '* | TOTAL AVERAGE EFFICIENCY (q=+-1)= ' +
455  '{:.2f}'.format(
456  tagging_eff *
457  100) +
458  " +- " +
459  '{:.2f}'.format(
460  DeltaTagging_eff *
461  100) +
462  ' % | *')
463  print('* | | *')
464  print(
465  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY (q=+-1)= ' +
466  '{:.6f}'.format(
467  average_eff_eff *
468  100) +
469  " +- " +
470  '{:.6f}'.format(
471  uncertainty_eff_effAverage *
472  100) +
473  ' % | *')
474  print('* | | *')
475  print(
476  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY ASYMMETRY (q=+-1)= ' +
477  '{:^9.6f}'.format(
478  diff_eff *
479  100) +
480  " +- " +
481  '{:.6f}'.format(
482  diff_eff_Uncertainty *
483  100) +
484  ' % | *')
485  print('* | | *')
486  print('* | B0-TAGGER TOTAL EFFECTIVE EFFICIENCIES: ' +
487  '{:.2f}'.format(tot_eff_effB0 * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0 * 100) +
488  ' % (q=+1) ' +
489  '{:.2f}'.format(tot_eff_effB0bar * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0bar * 100) +
490  ' % (q=-1) ' + ' | *')
491  print('* | | *')
492  print('* | FLAVOR PERCENTAGE (MC): ' +
493  '{:.2f}'.format(total_tagged_B0 / total_tagged * 100) + ' % (q=+1) ' +
494  '{:.2f}'.format(total_tagged_B0bar / total_tagged * 100) + ' % (q=-1) Diff=' +
495  '{:^5.2f}'.format((total_tagged_B0 - total_tagged_B0bar) / total_tagged * 100) + ' % | *')
496  print('* |__________________________________________________________________________________________| *')
497  print('* *')
498  print('****************************************************************************************************')
499  print('* *')
500 
501  # not that imortant
502  print('* --------------------------------- *')
503  print('* Efficiency Determination - easiest way *')
504  print('* --------------------------------- *')
505  total_tagged_B0 = histo_belleplotB0.GetEntries()
506  total_tagged_B0Bar = histo_belleplotB0bar.GetEntries()
507  total_tagged_wrong = histo_m1.GetEntries()
508  total_tagged_B0Bar_wrong = histo_m2.GetEntries()
509  total_tagged = total_tagged_B0 + total_tagged_B0Bar
510  total_tagged_wrong = total_tagged_wrong + total_tagged_B0Bar_wrong
511 
512  wrong_tag_fraction_B0 = total_tagged_wrong / total_tagged_B0
513  wrong_tag_fraction_B0Bar = total_tagged_B0Bar_wrong / total_tagged_B0Bar
514  wrong_tag_fraction = total_tagged_wrong / total_tagged
515  right_tag_fraction_B0 = 1 - 2 * wrong_tag_fraction_B0
516  right_tag_fraction_B0Bar = 1 - 2 * wrong_tag_fraction_B0Bar
517  right_tag_fraction = 1 - 2 * wrong_tag_fraction
518  wrong_eff_B0 = right_tag_fraction_B0 * right_tag_fraction_B0
519  wrong_eff_B0Bar = right_tag_fraction_B0Bar * right_tag_fraction_B0Bar
520  wrong_eff = right_tag_fraction * right_tag_fraction
521 
522  print('* wrong_tag_fraction for all: ' +
523  '{:.3f}'.format(wrong_tag_fraction * 100) +
524  ' % *')
525  print('* right_tag_fraction for all: ' +
526  '{:.3f}'.format(right_tag_fraction * 100) +
527  ' % *')
528  print('* wrong calculated eff all: ' + '{:.3f}'.format(wrong_eff * 100) +
529  ' % *')
530  print('* *')
531  print('****************************************************************************************************')
532  print('')
533  print('Table For B2TIP')
534  print('')
535  # write out the histograms
536  # histo_avr_r.Write('', ROOT.TObject.kOverwrite)
537  # histo_entries_per_bin.Write('', ROOT.TObject.kOverwrite)
538 
539  # histo_Cnet_output_B0.Write('', ROOT.TObject.kOverwrite)
540  # histo_Cnet_output_B0bar.Write('', ROOT.TObject.kOverwrite)
541  # histo_belleplotB0.Write('', ROOT.TObject.kOverwrite)
542  # histo_belleplotB0bar.Write('', ROOT.TObject.kOverwrite)
543  # histo_calib_B0.Write('', ROOT.TObject.kOverwrite)
544  # histo_calib_B0bar.Write('', ROOT.TObject.kOverwrite)
545 
546  maxB0 = histo_belleplotB0.GetBinContent(histo_belleplotB0.GetMaximumBin())
547  maxB0bar = histo_belleplotB0bar.GetBinContent(histo_belleplotB0bar.GetMaximumBin())
548 
549  Ymax = max(maxB0, maxB0bar)
550  Ymax = Ymax + Ymax / 12
551 
552  if YmaxForQrPlot < Ymax:
553  YmaxForQrPlot = Ymax
554 
555  # produce a pdf
556  ROOT.gStyle.SetOptStat(0)
557  Canvas1 = ROOT.TCanvas('Bla' + method, 'Final Output', 1200, 800)
558  Canvas1.cd() # activate
559  histo_belleplotB0.SetFillColorAlpha(ROOT.kBlue, 0.2)
560  histo_belleplotB0.SetFillStyle(1001)
561  histo_belleplotB0.GetYaxis().SetLabelSize(0.03)
562  histo_belleplotB0.GetYaxis().SetLimits(0, YmaxForQrPlot)
563  histo_belleplotB0.GetYaxis().SetTitleOffset(1.2)
564  histo_belleplotB0.SetLineColor(ROOT.kBlue)
565  histo_belleplotB0bar.SetFillColorAlpha(ROOT.kRed, 1.0)
566  histo_belleplotB0bar.SetFillStyle(3005)
567  histo_belleplotB0bar.SetLineColor(ROOT.kRed)
568  # SetLabelSize etc SetTitle
569 
570  histo_belleplotB0.SetTitle('Final Flavor Tagger Output; #it{qr}-output ; Events'
571  )
572  histo_belleplotB0.SetMinimum(0)
573  histo_belleplotB0.SetMaximum(YmaxForQrPlot)
574  histo_belleplotB0.Draw('hist')
575  histo_belleplotB0bar.Draw('hist same')
576 
577  leg = ROOT.TLegend(0.75, 0.8, 0.9, 0.9)
578  leg.AddEntry(histo_belleplotB0, 'true B0')
579  leg.AddEntry(histo_belleplotB0bar, 'true B0bar')
580  leg.Draw()
581 
582  Canvas1.Update()
583  # IPython.embed()
584  with Quiet(ROOT.kError):
585  Canvas1.SaveAs(workingDirectory + '/' + 'PIC_Belleplot_both' + method + '.pdf')
586 
587  # produce the nice calibration plot
588  Canvas2 = ROOT.TCanvas('Bla2' + method, 'Calibration plot for true B0', 1200, 800)
589  Canvas2.cd() # activate
590  histo_calib_B0.SetFillColorAlpha(ROOT.kBlue, 0.2)
591  histo_calib_B0.SetFillStyle(1001)
592  histo_calib_B0.GetYaxis().SetTitleOffset(1.2)
593  histo_calib_B0.SetLineColor(ROOT.kBlue)
594 
595  histo_calib_B0.SetTitle('Calibration For True B0; #it{qr}-output ; Calibration '
596  )
597  histo_calib_B0.Draw('hist')
598  diag.Draw('SAME')
599  Canvas2.Update()
600  with Quiet(ROOT.kError):
601  Canvas2.SaveAs(workingDirectory + '/' + 'PIC_Calibration_B0' + method + '.pdf')
602 
603  histo_avr_r.Delete()
604  histo_avr_rB0.Delete()
605  histo_avr_rB0bar.Delete()
606  histo_ms_r.Delete()
607  histo_ms_rB0.Delete()
608  histo_ms_rB0bar.Delete()
609  histo_mc_NwB0.Delete()
610  histo_mc_NwB0bar.Delete()
611  histo_notTaggedEvents.Delete()
612  histo_entries_per_bin.Delete()
613  histo_entries_per_binB0.Delete()
614  histo_entries_per_binB0bar.Delete()
615  histo_Cnet_output_B0.Delete()
616  histo_Cnet_output_B0bar.Delete()
617  histo_belleplotB0.Delete()
618  histo_belleplotB0bar.Delete()
619  histo_calib_B0.Delete()
620  histo_calib_B0bar.Delete()
621  hallo12.Delete()
622  histo_m0.Delete()
623  histo_m1.Delete()
624  histo_m2.Delete()
625  Canvas1.Clear()
626  Canvas2.Clear()
627 
628  print(r'\begin{tabularx}{1\textwidth}{@{}r r r r r r r@{}}')
629  print(r'\hline')
630  print(r'$r$- Interval $\enskip$ & $\varepsilon_i\ $ & $\Delta\varepsilon_i\ $ & $w_i \pm \delta w_i\enskip\; $ ' +
631  r' & $\Delta w_i \pm \delta\Delta w_i $& $\varepsilon_{\text{eff}, i} \pm \delta\varepsilon_{\text{eff}, i}\enskip\, $ ' +
632  r' & $\Delta \varepsilon_{\text{eff}, i} \pm \delta\Delta \varepsilon_{\text{eff}, i}\enskip\, $\\ \hline\hline')
633  for i in range(1, r_size):
634  print('$ ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + '$ & $'
635  '{: 6.1f}'.format(event_fractionTotal[i] * 100) + r'$ & $' +
636  '{: 7.2f}'.format(event_fractionDiff[i] * 100) + r'\;$ & $' +
637  '{: 7.2f}'.format(wvalue[i] * 100) + r" \pm " + '{:2.2f}'.format(wvalueUncertainty[i] * 100) + r'\enskip $ & $' +
638  '{: 7.2f}'.format(wvalueDiff[i] * 100) + r" \pm " +
639  '{:2.2f}'.format(wvalueDiffUncertainty[i] * 100) + r'\enskip $ & $' +
640  '{: 8.4f}'.format(iEffEfficiency[i] * 100) + # + '$ & $' +
641  r" \pm " + '{:2.4f}'.format(iEffEfficiencyUncertainty[i] * 100) + r'\, $ & $' +
642  '{: 6.4f}'.format(iDeltaEffEfficiency[i] * 100) + # +
643  r" \pm " + '{:2.4f}'.format(iDeltaEffEfficiencyUncertainty[i] * 100) +
644  r'\enskip\enskip $ \\ ')
645  print(r'\hline\hline')
646  print(r'\multicolumn{1}{r}{Total} & & \multicolumn{5}{r}{ $\varepsilon_\text{eff} = ' +
647  r'\sum_i \varepsilon_i \cdot \langle 1-2w_i\rangle^2 = ' +
648  '{: 6.2f}'.format(average_eff_eff * 100) + r" \pm " + '{: 6.2f}'.format(uncertainty_eff_effAverage * 100) + r'\enskip\, ')
649  print(r'\Delta \varepsilon_\text{eff} = ' +
650  '{: 6.2f}'.format(diff_eff * 100) + r" \pm " + '{: 6.2f}'.format(diff_eff_Uncertainty * 100) + r'\quad\ $ }' +
651  r' \\ ')
652  print(r'\hline')
653  print(r'\end{tabular}')
654  print('')
655 
656  print('')
657  print('Mu-Values for Table')
658  print('')
659 
660  for i in range(1, r_size):
661  print('$ ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + '$ & $'
662  '{: 7.2f}'.format(muParam[i] * 100) + r" \pm " + '{:2.2f}'.format(muParamUncertainty[i] * 100) + r' $ & ')
663 # ************************************************
664 # DETERMINATION OF INDIVIDUAL EFFECTIVE EFFICIENCY
665 # ************************************************
666 
667 # keep in mind:
668 # the individual efficiency is determined on basis of the combiner training.
669 # Whereas the efficiency is determined on basis of the final expert output.
670 # print(eventLevelParticleLists)
671 # eventLevelParticleLists.append(('K+:inRoe', 'KaonNotWeighted',
672 # 'QpOf(K+:inRoe, isRightCategory(Kaon), isRightCategory(Kaon))'))
673 # eventLevelParticleLists.append(('Lambda0:inRoe', 'LambdaNotWeighted',
674 # 'weightedQpOf(Lambda0:inRoe, isRightCategory(Lambda), isRightCategory(Lambda))'))
675 # eventLevelParticleLists.append(('e+:inRoe', 'Electron',
676 # 'weightedQpOf(e+:inRoe, isRightCategory(Electron), isRightCategory(Electron))'))
677 # eventLevelParticleLists.append(
678 # ('e+:inRoe',
679 # 'IntermediateElectron',
680 # 'weightedQpOf(e+:inRoe, isRightCategory(IntermediateElectron), isRightCategory(IntermediateElectron))'))
681 # eventLevelParticleLists.append(('mu+:inRoe', 'Muon',
682 # 'weightedQpOf(e+:inRoe, isRightCategory(Muon), isRightCategory(Muon))'))
683 # eventLevelParticleLists.append(('mu+:inRoe', 'IntermediateMuon',
684 # 'weightedQpOf(e+:inRoe, isRightCategory(IntermediateMuon), isRightCategory(IntermediateMuon))'))
685 # eventLevelParticleLists.append(('mu+:inRoe', 'KinLepton',
686 # 'weightedQpOf(e+:inRoe, isRightCategory(KinLepton), isRightCategory(KinLepton))'))
687 # eventLevelParticleLists.append(
688 # ('mu+:inRoe',
689 # 'IntermediateKinLepton',
690 # 'weightedQpOf(e+:inRoe, isRightCategory(IntermediateKinLepton), isRightCategory(IntermediateKinLepton))'))
691 # eventLevelParticleLists.append(('pi+:inRoe', 'SlowPion',
692 # 'weightedQpOf(e+:inRoe, isRightCategory(MaximumPstar), isRightCategory(MaximumPstar))'))
693 # eventLevelParticleLists.append(('pi+:inRoe', 'FSC',
694 # 'weightedQpOf(e+:inRoe, isRightCategory(SlowPion), isRightCategory(SlowPion))'))
695 # eventLevelParticleLists.append(('pi+:inRoe', 'MaximumPstar',
696 # 'weightedQpOf(e+:inRoe, isRightCategory(FSC), isRightCategory(SlowPion))'))
697 # eventLevelParticleLists.append(('pi+:inRoe', 'FastHadron',
698 # 'weightedQpOf(e+:inRoe, isRightCategory(FastHadron), isRightCategory(FastHadron))'))
699 # eventLevelParticleLists.append(('K+:inRoe', 'KaonPion',
700 # 'weightedQpOf(e+:inRoe, isRightCategory(KaonPion), isRightCategory(Kaon))'))
701 
702 
703 print(ft.eventLevelParticleLists)
704 
705 
706 print('******************************************* MEASURED EFFECTIVE EFFICIENCY FOR INDIVIDUAL CATEGORIES ' +
707  '**********************************************')
708 print('* ' +
709  ' *')
710 # input: Classifier input from event-level. Output of event-level is recalculated for input on combiner-level.
711 # but is re-evaluated under combiner target. Signal is B0, background is B0Bar.
712 categoriesPerformance = []
713 NbinsCategories = 100
714 for (particleList, category, combinerVariable) in ft.eventLevelParticleLists:
715  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
716  hist_both = ROOT.TH1F('Both_' + category, 'Input Both (B0) ' +
717  category + ' (binning)', NbinsCategories, -1.0, 1.0)
718  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
719  hist_signal = ROOT.TH1F('Signal_' + category, 'Input Signal (B0) ' +
720  category + ' (binning)', NbinsCategories, -1.0, 1.0)
721  # histogram of input variable (only background) - not yet a probability! It's a classifier plot!
722  hist_background = ROOT.TH1F('Background_' + category, 'Input Background (B0bar) ' +
723  category + ' (binning)', NbinsCategories, -1.0, 1.0)
724 
725  # per definition that input is not comparable to the network output, this has to be transformed.
726  # probability output from 0 to 1 (corresponds to net output probability) -> calculation below
727  hist_probB0 = ROOT.TH1F('Probability' + category,
728  'Transformed to probability (B0) (' + category + ')',
729  NbinsCategories, 0.0, 1.0)
730  hist_probB0bar = ROOT.TH1F('ProbabilityB0bar_' + category,
731  'Transformed to probability (B0bar) (' + category + ')',
732  NbinsCategories, 0.0, 1.0)
733  # qp output from -1 to 1 -> transformation below
734  hist_qrB0 = ROOT.TH1F('QR' + category, 'Transformed to qp (B0)(' +
735  category + ')', NbinsCategories, -1.0, 1.0)
736  hist_qrB0bar = ROOT.TH1F('QRB0bar_' + category, 'Transformed to qp (B0bar) (' +
737  category + ')', NbinsCategories, -1.0, 1.0)
738  # histogram for abs(qp), i.e. this histogram contains the r-values -> transformation below
739  # also used to get the number of entries, sorted into 6 bins
740  histo_entries_per_bin = ROOT.TH1F('entries_per_bin_' + category, 'Abs(qp)(B0) (' + category + ')', int(r_size - 2), r_subsample)
741  histo_entries_per_binB0 = ROOT.TH1F('entries_per_bin' + category, 'Abs(qp)(B0) (' +
742  category + ')', int(r_size - 2), r_subsample)
743  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar_' + category,
744  'Abs(qp) (B0bar) (' + category + ')', int(r_size - 2), r_subsample)
745 
746  # histogram contains at the end the average r values -> calculation below
747  # sorted into r bins
748  hist_avr_rB0 = ROOT.TH1F('Average_r' + category, 'Average r for B0' +
749  category, int(r_size - 2), r_subsample)
750  hist_avr_rB0bar = ROOT.TH1F('Average_rB0bar_' + category, 'Average r for B0bar' +
751  category, int(r_size - 2), r_subsample)
752 
753  hist_ms_rB0 = ROOT.TH1F('AverageSqrdR' + category, 'Average r sqrd for B0' +
754  category, int(r_size - 2), r_subsample)
755  hist_ms_rB0bar = ROOT.TH1F('AverageSqrdRB0bar_' + category, 'Average r sqrd for B0bar' +
756  category, int(r_size - 2), r_subsample)
757 
758  # ****** TEST OF CALIBRATION ******
759  # for calibration plot we want to have
760  hist_all = ROOT.TH1F('All_' + category, 'Input Signal (B0) and Background (B0Bar)' +
761  category + ' (binning 50)', 50, 0.0, 1.0)
762  hist_calib_B0 = ROOT.TH1F('Calib_' + category, 'Calibration Plot for true B0' +
763  category + ' (binning 50)', 50, 0.0, 1.0)
764 
765  # fill both
766  if category != "KaonNotWeighted" and category != "LambdaNotWeighted":
767  # if category == 'Lambda' or combinerVariable.find('weighted') == -1:
768  tree.Draw('qp' + category + '>>Both_' + category, 'abs(qrMC) == 1.0')
769  # fill signal
770  tree.Draw('qp' + category + '>>Signal_' + category, 'qrMC == 1.0')
771  # fill background
772  tree.Draw('qp' + category + '>>Background_' + category, 'qrMC == -1.0')
773  tree.Draw('qp' + category + '>>All_' + category, 'qrMC!=0')
774  tree.Draw('qp' + category + '>>Calib_' + category, 'qrMC == 1.0')
775 
776  # elif combinerVariable.find('weighted') != -1:
777  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
778  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Signal_' + category, 'qrMC == 1.0')
779  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Background_' + category, 'qrMC == -1.0')
780  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>All_' + category, 'qrMC!=0')
781  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Calib_' + category, 'qrMC == 1.0')
782  # category = category + 'W'
783 
784  elif category == "KaonNotWeighted":
785  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
786  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Signal_' + category, 'qrMC == 1.0')
787  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Background_' + category, 'qrMC == -1.0')
788  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>All_' + category, 'qrMC!=0')
789  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Calib_' + category, 'qrMC == 1.0')
790 
791  elif category == "LambdaNotWeighted":
792  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
793  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Signal_' + category, 'qrMC == 1.0')
794  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Background_' + category, 'qrMC == -1.0')
795  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>All_' + category, 'qrMC!=0')
796  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Calib_' + category, 'qrMC == 1.0')
797 
798  hist_calib_B0.Divide(hist_all)
799  # ****** produce the input plots from combiner level ******
800 
801  maxSignal = hist_signal.GetBinContent(hist_signal.GetMaximumBin())
802  maxBackground = hist_background.GetBinContent(hist_background.GetMaximumBin())
803 
804  Ymax = max(maxSignal, maxBackground)
805  Ymax = Ymax + Ymax / 12
806 
807  ROOT.gStyle.SetOptStat(0)
808  with Quiet(ROOT.kError):
809  Canvas = ROOT.TCanvas('Bla', 'TITEL BLA', 1200, 800)
810  Canvas.cd() # activate
811  Canvas.SetLogy()
812  hist_signal.SetFillColorAlpha(ROOT.kBlue, 0.2)
813  hist_signal.SetFillStyle(1001)
814  hist_signal.SetTitleSize(0.1)
815  hist_signal.GetXaxis().SetLabelSize(0.04)
816  hist_signal.GetYaxis().SetLabelSize(0.04)
817  hist_signal.GetXaxis().SetTitleSize(0.05)
818  hist_signal.GetYaxis().SetTitleSize(0.05)
819  hist_signal.GetXaxis().SetTitleOffset(0.95)
820  hist_signal.GetYaxis().SetTitleOffset(1.1)
821  hist_signal.GetYaxis().SetLimits(0, Ymax)
822  hist_signal.SetLineColor(ROOT.kBlue)
823  hist_background.SetFillColorAlpha(ROOT.kRed, 1.0)
824  hist_background.SetFillStyle(3005)
825  hist_background.GetYaxis().SetLimits(0, Ymax)
826  hist_background.SetLineColor(ROOT.kRed)
827 
828  hist_signal.SetTitle(category + ' category; #it{qp}-Output ; Events')
829  # hist_signal.SetMinimum(0)
830  hist_signal.SetMaximum(Ymax)
831  # hist_background.SetMinimum(0)
832  hist_background.SetMaximum(Ymax)
833 
834  hist_signal.Draw('hist')
835  hist_background.Draw('hist same')
836 
837  if category == 'MaximumPstar':
838  legend = ROOT.TLegend(0.4, 0.75, 0.6, 0.9)
839  else:
840  legend = ROOT.TLegend(0.6, 0.75, 0.8, 0.9)
841  legend.AddEntry(hist_signal, 'true B0')
842  legend.AddEntry(hist_background, 'true B0bar')
843  legend.SetTextSize(0.05)
844  legend.Draw()
845 
846  Canvas.Update()
847  with Quiet(ROOT.kError):
848  Canvas.SaveAs(workingDirectory + '/' + 'PIC_' + category + '_Input_Combiner.pdf')
849 
850  # ***** TEST OF CALIBRATION ******
851 
852  # initialize some arrays
853  binCounter = int(NbinsCategories + 1)
854  dilutionB02 = array('d', [0] * binCounter)
855  dilutionB0bar2 = array('d', [0] * binCounter)
856  purityB0 = array('d', [0] * binCounter)
857  purityB0bar = array('d', [0] * binCounter)
858  signal = array('d', [0] * binCounter)
859  back = array('d', [0] * binCounter)
860  weight = array('d', [0] * binCounter)
861 
862  for i in range(1, binCounter):
863  # doing the transformation to probabilities
864  signal[i] = hist_signal.GetBinContent(i)
865  back[i] = hist_background.GetBinContent(i)
866  weight[i] = signal[i] + back[i]
867 
868  # avoid dividing by zero
869  if signal[i] + back[i] == 0:
870  purityB0[i] = 0
871  dilutionB02[i] = 0
872  purityB0bar[i] = 0
873  dilutionB0bar2[i] = 0
874  else:
875 
876  purityB0[i] = signal[i] / (signal[i] + back[i])
877  dilutionB02[i] = -1 + 2 * signal[i] / (signal[i] + back[i])
878 
879  purityB0bar[i] = back[i] / (signal[i] + back[i])
880  dilutionB0bar2[i] = -1 + 2 * back[i] / (signal[i] + back[i])
881 
882  # filling histogram with probability from 0 to 1
883  hist_probB0.Fill(purityB0[i], signal[i])
884  hist_probB0bar.Fill(purityB0bar[i], back[i])
885 
886  # filling histogram with qr from -1 to 1
887  hist_qrB0.Fill(dilutionB02[i], signal[i])
888  hist_qrB0bar.Fill(dilutionB0bar2[i], back[i])
889 
890  # filling histogram with abs(qr), i.e. this histogram contains the r-values (not qp)
891  histo_entries_per_binB0.Fill(abs(dilutionB02[i]), signal[i])
892  histo_entries_per_binB0bar.Fill(abs(dilutionB0bar2[i]), back[i])
893  # filling histogram with abs(qr) special weighted - needed for average r calculation
894  hist_avr_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i]) * signal[i])
895  hist_avr_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i]) * back[i])
896  # filling histogram with abs(qr)**2 special weighted - needed for std dev of r calculation
897  hist_ms_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i] * dilutionB02[i]) * signal[i])
898  hist_ms_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i] * dilutionB02[i]) * back[i])
899 
900  # hist_avr_rB0bar contains now the average r-value
901  hist_avr_rB0.Divide(histo_entries_per_binB0)
902  hist_avr_rB0bar.Divide(histo_entries_per_binB0bar)
903 
904  hist_ms_rB0.Divide(histo_entries_per_binB0)
905  hist_ms_rB0bar.Divide(histo_entries_per_binB0bar)
906  # now calculating the efficiency
907 
908  # calculating number of events
909  total_entriesB0 = total_notTagged / 2
910  total_entriesB0bar = total_notTagged / 2
911  for i in range(1, r_size):
912  total_entriesB0 = total_entriesB0 + histo_entries_per_binB0.GetBinContent(i)
913  total_entriesB0bar = total_entriesB0bar + histo_entries_per_binB0bar.GetBinContent(i)
914  # initializing some arrays
915  tot_eff_effB0 = 0
916  tot_eff_effB0bar = 0
917  uncertainty_eff_effB0 = 0
918  uncertainty_eff_effB0bar = 0
919  uncertainty_eff_effAverage = 0
920  diff_eff_Uncertainty = 0
921  event_fractionB0 = array('f', [0] * r_size)
922  event_fractionB0bar = array('f', [0] * r_size)
923  rvalueB0 = array('f', [0] * r_size)
924  rvalueB0bar = array('f', [0] * r_size)
925  rvalueStdB0 = array('f', [0] * r_size)
926  rvalueStdB0bar = array('f', [0] * r_size)
927  # wvalue = array('f', [0] * r_size)
928  entriesBoth = array('f', [0] * r_size)
929  entriesB0 = array('f', [0] * r_size)
930  entriesB0bar = array('f', [0] * r_size)
931  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
932  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
933  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
934 
935  for i in range(1, r_size):
936 
937  entriesBoth[i] = entriesB0bar[i] + entriesB0[i]
938  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
939  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
940  event_fractionB0[i] = entriesB0[i] / total_entriesB0
941  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
942  # print '* Bin ' + str(i) + ' r-value: ' + str(rvalueB0[i]), 'entriesB0: ' +
943  # str(event_fractionB0[i] * 100) + ' % (' + str(entriesB0[i]) + '/' +
944  # str(total_entriesB0) + ')'
945 
946  rvalueB0[i] = hist_avr_rB0.GetBinContent(i)
947  rvalueB0bar[i] = hist_avr_rB0bar.GetBinContent(i)
948 
949  rvalueStdB0[i] = 0
950  rvalueStdB0bar[i] = 0
951 
952  if entriesB0[i] > 1:
953  rvalueStdB0[i] = math.sqrt(abs(hist_ms_rB0.GetBinContent(
954  i) - (hist_avr_rB0.GetBinContent(i))**2)) / math.sqrt(entriesB0[i] - 1)
955 
956  if entriesB0bar[i] > 1:
957  rvalueStdB0bar[i] = math.sqrt(abs(hist_ms_rB0bar.GetBinContent(
958  i) - (hist_avr_rB0bar.GetBinContent(i))**2)) / math.sqrt(entriesB0bar[i] - 1)
959  # wvalue[i] = (1 - rvalueB0[i]) / 2
960 
961  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * rvalueB0[i] \
962  * rvalueB0[i]
963  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * rvalueB0bar[i] \
964  * rvalueB0bar[i]
965 
966  iEffEfficiencyB0Uncertainty[i] = rvalueB0[i] * \
967  math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
968  rvalueB0[i]**2 * entriesB0[i] *
969  (total_entriesB0 * (total_entriesB0 - entriesB0[i]) +
970  entriesB0[i] * total_notTagged)) / (total_entriesB0**2)
971  iEffEfficiencyB0barUncertainty[i] = rvalueB0bar[i] * \
972  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
973  rvalueB0bar[i]**2 * entriesB0bar[i] *
974  (total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i]) +
975  entriesB0bar[i] * total_notTagged)) / (total_entriesB0bar**2)
976 
977  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
978 
979  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
980 
981  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
982  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
983 
984  effDiff = tot_eff_effB0 - tot_eff_effB0bar
985  effAverage = (tot_eff_effB0 + tot_eff_effB0bar) / 2
986 
987  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
988  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
989  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
990  uncertainty_eff_effAverage = diff_eff_Uncertainty / 2
991  print(
992  '{:<25}'.format("* " + category) + ' B0-Eff=' +
993  '{: 8.2f}'.format(tot_eff_effB0 * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0 * 100) +
994  ' %' +
995  ' B0bar-Eff=' +
996  '{: 8.2f}'.format(tot_eff_effB0bar * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0bar * 100) +
997  ' %' +
998  ' EffAverage=' +
999  '{: 8.2f}'.format(effAverage * 100) + " +- " + '{:4.2f}'.format(uncertainty_eff_effAverage * 100) + ' %' +
1000  ' EffDiff=' +
1001  '{: 8.2f}'.format(effDiff * 100) + " +- " + '{:4.2f}'.format(diff_eff_Uncertainty * 100) + ' % *')
1002 
1003  # hist_signal.Write('', ROOT.TObject.kOverwrite)
1004  # hist_background.Write('', ROOT.TObject.kOverwrite)
1005  # hist_probB0.Write('', ROOT.TObject.kOverwrite)
1006  # hist_probB0bar.Write('', ROOT.TObject.kOverwrite)
1007  # hist_qpB0.Write('', ROOT.TObject.kOverwrite)
1008  # hist_qpB0bar.Write('', ROOT.TObject.kOverwrite)
1009  # hist_absqpB0.Write('', ROOT.TObject.kOverwrite)
1010  # hist_absqpB0bar.Write('', ROOT.TObject.kOverwrite)
1011  # hist_avr_rB0.Write('', ROOT.TObject.kOverwrite)
1012  # hist_avr_rB0bar.Write('', ROOT.TObject.kOverwrite)
1013  # hist_all.Write('', ROOT.TObject.kOverwrite)
1014  # hist_calib_B0.Write('', ROOT.TObject.kOverwrite)
1015  categoriesPerformance.append((category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty))
1016  with Quiet(ROOT.kError):
1017  Canvas.Clear()
1018 # if average_eff != 0:
1019  # print '* -------------------------------------------------------------------------'
1020  # print '* ' + '{: > 8.2f}'.format(average_eff * 100) + ' %' \
1021  # + '{:>85}'.format('TOTAL' + ' *')
1022 
1023 print('* ' +
1024  ' *')
1025 print('**************************************************************************************************************************' +
1026  '************************')
1027 print('')
1028 print('Table For B2TIP')
1029 print('')
1030 print(r'\begin{tabular}{ l r r }')
1031 print(r'\hline')
1032 print(r'Categories & $\varepsilon_\text{eff} \pm \delta\varepsilon_\text{eff} $& ' +
1033  r'$\Delta\varepsilon_\text{eff} \pm \delta\Delta\varepsilon_\text{eff}$\\ \hline\hline')
1034 for (category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty) in categoriesPerformance:
1035  print(
1036  '{:<23}'.format(category) +
1037  ' & $' +
1038  '{: 6.2f}'.format(effAverage * 100) + r" \pm " + '{:4.2f}'.format(uncertainty_eff_effAverage * 100) +
1039  ' $ & $' +
1040  '{: 6.2f}'.format(effDiff * 100) + r" \pm " + '{:4.2f}'.format(diff_eff_Uncertainty * 100) +
1041  r'\ \enskip $ \\')
1042 print(r'\hline')
1043 print(r'\end{tabular}')
1044 B2INFO('qp Output Histograms in pdf format saved at: ' + workingDirectory)
1045 with open("categoriesPerformance.pkl", "wb") as f:
1046  pickle.dump(categoriesPerformance, f)