Belle II Software  light-2403-persian
flavorTaggerEfficiency.py
1 #!/usr/bin/env python3
2 
3 
10 
11 # ************** Flavor Tagging **************
12 # * *
13 # * This script calculates the parameters char- *
14 # * acterizing the performance of the flavor *
15 # * tagger. It produces plots for the qr *
16 # * distribution of the combiners and the dis- *
17 # * tributions of the combiner input variables. *
18 # * *
19 # ***********************************************
20 
21 import ROOT
22 from basf2 import B2INFO
23 import flavorTagger as ft
24 from defaultEvaluationParameters import categories, Quiet, r_subsample, r_size
25 from array import array
26 import pickle
27 import math
28 import glob
29 import sys
30 
31 ROOT.gROOT.SetBatch(True)
32 
33 if len(sys.argv) != 3:
34  sys.exit("Must provide 2 arguments: [input_sim_file] or ['input_sim_file*'] with wildcards and [treeName]"
35  )
36 workingFile = sys.argv[1]
37 workingFiles = glob.glob(str(workingFile))
38 treeName = str(sys.argv[2])
39 
40 if len(workingFiles) < 1:
41  sys.exit("No file name or file names " + str(workingFile) + " found.")
42 
43 
44 workingDirectory = '.'
45 
46 #
47 # *******************************************
48 # DETERMINATION OF TOTAL EFFECTIVE EFFICIENCY
49 # *******************************************
50 #
51 
52 # working directory
53 # needs the B0bar_final.root-file
54 # treeName = 'B0tree'
55 
56 
57 methods = []
58 
59 tree = ROOT.TChain(treeName)
60 
61 mcstatus = array('d', [-511.5, 0.0, 511.5])
62 ROOT.TH1.SetDefaultSumw2()
63 
64 for iFile in workingFiles:
65  tree.AddFile(iFile)
66 
67 totalBranches = []
68 for branch in tree.GetListOfBranches():
69  totalBranches.append(branch.GetName())
70 
71 if 'FBDT_qrCombined' in totalBranches:
72  methods.append("FBDT")
73 
74 if 'FANN_qrCombined' in totalBranches:
75  methods.append("FANN")
76 
77 if 'DNN_qrCombined' in totalBranches:
78  methods.append("DNN")
79 
80 usedCategories = []
81 for cat in categories:
82  catBranch = 'qp' + cat
83  if catBranch in totalBranches:
84  usedCategories.append(cat)
85 
86 YmaxForQrPlot = 0
87 
88 total_notTagged = 0
89 
90 for method in methods:
91  # Get error with GetBinError(), set error with SetBinError()
92  # histogram contains the average r in each of 7 bins -> calculation see below
93  histo_avr_r = ROOT.TH1F('Average_r', 'Average r in each of the bins (B0 and B0bar)', int(r_size - 2),
94  r_subsample)
95  histo_avr_rB0 = ROOT.TH1F('Average_rB0', 'Average r in each of the bins (B0)', int(r_size - 2),
96  r_subsample)
97  histo_avr_rB0bar = ROOT.TH1F('Average_rB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
98  r_subsample)
99 
100  # histogram containing the wrong tags calculated by counting Wrong tags
101  histo_mc_NwB0 = ROOT.TH1F('mc_NwB0', 'Average r in each of the bins (B0)', int(r_size - 2),
102  r_subsample)
103  histo_mc_NwB0bar = ROOT.TH1F('mc_NwB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
104  r_subsample)
105 
106  # histogram contains the mean squared of r in each of 7 bins -> calculation see below
107  histo_ms_r = ROOT.TH1F('MS_r', 'Mean squared average of r in each of the bins (B0 and B0bar)', int(r_size - 2),
108  r_subsample)
109  histo_ms_rB0 = ROOT.TH1F('MS_rB0', 'Mean squared average of r in each of the bins (B0)', int(r_size - 2),
110  r_subsample)
111  histo_ms_rB0bar = ROOT.TH1F('MS_rB0bar', 'Mean squared average of r in each of the bins (B0bar)', int(r_size - 2),
112  r_subsample)
113 
114  # histogram with number of entries in for each bin
115  histo_entries_per_bin = ROOT.TH1F(
116  'entries_per_bin',
117  'Events binned in r_subsample according to their r-value for B0 and B0bar prob',
118  int(r_size - 2),
119  r_subsample)
120  histo_entries_per_binB0 = ROOT.TH1F('entries_per_binB0', 'Events binned in r_subsample according '
121  'to their r-value for B0 prob', int(r_size - 2), r_subsample)
122  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar',
123  'Events binned in r_subsample according to their r-value '
124  'for B0bar prob', int(r_size - 2), r_subsample)
125  # histogram network output (not qr and not r) for true B0 (signal) - not necessary
126  histo_Cnet_output_B0 = ROOT.TH1F('Comb_Net_Output_B0', 'Combiner network output [not equal to r] '
127  'for true B0 (binning 100)', 100, 0.0, 1.0)
128  # histogram network output (not qr and not r) for true B0bar (background) - not necessary
129  histo_Cnet_output_B0bar = ROOT.TH1F('Comb_Net_Output_B0bar', 'Combiner network output [not equal to r] '
130  'for true B0bar (binning 100)', 100, 0.0, 1.0)
131  # histogram containing the belle paper plot (qr-tagger output for true B0)
132  histo_belleplotB0 = ROOT.TH1F('BellePlot_B0',
133  'BellePlot for true B0 (binning 50)', 50,
134  -1.0, 1.0)
135  # histogram containing the belle paper plot (qr-tagger output for true B0bar)
136  histo_belleplotB0bar = ROOT.TH1F('BellePlot_B0Bar',
137  'BellePlot for true B0Bar (binning 50)',
138  50, -1.0, 1.0)
139 
140  histo_notTaggedEvents = ROOT.TH1F('notTaggedEvents',
141  'Histogram for not tagged events',
142  1, -3.0, -1.0)
143 
144  # calibration plot for B0. If we get a linear line our MC is fine, than the assumption r ~ 1- 2w is reasonable
145  # expectation is, that for B0 calibration plot: qr=0 half B0 and half B0bar, qr = 1 only B0 and qr = -1
146  # no B0. Inverse for B0bar calibration plot
147  histo_calib_B0 = ROOT.TH1F('Calibration_B0', 'CalibrationPlot for true B0', 100, -1.0, 1.0)
148  # calibration plot for B0bar calibration plot
149  histo_calib_B0bar = ROOT.TH1F('Calibration_B0Bar',
150  'CalibrationPlot for true B0Bar', 100, -1.0,
151  1.0)
152  # belle plot with true B0 and B0bars
153  hallo12 = ROOT.TH1F('BellePlot_NoCut', 'BellePlot_NoCut (binning 100)',
154  100, -1.0, 1.0)
155 
156 
157  diag = ROOT.TF1('diag', 'pol1', -1, 1)
158 
159 
160  # histograms for the efficiency calculation in wrong way
161  histo_m0 = ROOT.TH1F('BellePlot_m0',
162  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
163  histo_m1 = ROOT.TH1F('BellePlot_m1',
164  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
165  histo_m2 = ROOT.TH1F('BellePlot_m2',
166  'BellePlot_m for true B0Bar (binning 50)', 50, -1.0,
167  1.0)
168 
169  # filling the histograms
170 
171  tree.Draw(method + '_qrCombined>>BellePlot_B0', 'qrMC == 1')
172  tree.Draw(method + '_qrCombined>>BellePlot_B0Bar', 'qrMC == -1')
173  tree.Draw(method + '_qrCombined>>BellePlot_NoCut', 'abs(qrMC) == 1')
174 
175  tree.Draw(method + '_qrCombined>>Calibration_B0', 'qrMC == 1')
176  tree.Draw(method + '_qrCombined>>Calibration_B0Bar', 'qrMC == -1')
177 
178  tree.Draw(method + '_qrCombined>>notTaggedEvents',
179  'abs(qrMC) == 0 && isSignal == 1 && ' +
180  method + '_qrCombined < -1')
181 
182  # filling histograms wrong efficiency calculation
183  tree.Draw(method + '_qrCombined>>BellePlot_m0',
184  'qrMC == 1 && ' + method + '_qrCombined>0')
185  tree.Draw(method + '_qrCombined>>BellePlot_m1',
186  'qrMC == 1 && ' + method + '_qrCombined<0')
187  tree.Draw(method + '_qrCombined>>BellePlot_m2',
188  'qrMC == -1 && ' + method + '_qrCombined>0 ')
189 
190  # filling with abs(qr) in one of 6 bins with its weight
191  # separate calculation for B0 and B0bar
192 
193  tree.Project('Average_r', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(abs(qrMC) == 1)')
194  tree.Project('Average_rB0', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==1)')
195  tree.Project('Average_rB0bar', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==-1)')
196 
197  tree.Project('MS_r', 'abs(' + method + '_qrCombined)', '(' + method +
198  '_qrCombined*' + method + '_qrCombined)*(abs(qrMC) == 1)')
199  tree.Project('MS_rB0', 'abs(' + method + '_qrCombined)',
200  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==1)')
201  tree.Project('MS_rB0bar', 'abs(' + method + '_qrCombined)',
202  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==-1)')
203 
204  # filling with abs(qr) in one of 6 bins
205  tree.Project('entries_per_bin', 'abs(' + method + '_qrCombined)', 'abs(qrMC) == 1')
206  tree.Project('entries_per_binB0', 'abs(' + method + '_qrCombined)', 'qrMC == 1')
207  tree.Project('entries_per_binB0bar', 'abs(' + method + '_qrCombined)', 'qrMC == -1')
208 
209  # filling histograms with number of wrong tags per r-bin
210  tree.Project('mc_NwB0', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == 1')
211  tree.Project('mc_NwB0bar', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == -1')
212 
213  # producing the average r histograms
214  histo_avr_r.Divide(histo_entries_per_bin)
215  histo_avr_rB0.Divide(histo_entries_per_binB0)
216  histo_avr_rB0bar.Divide(histo_entries_per_binB0bar)
217 
218  histo_ms_r.Divide(histo_entries_per_bin)
219  histo_ms_rB0.Divide(histo_entries_per_binB0)
220  histo_ms_rB0bar.Divide(histo_entries_per_binB0bar)
221 
222  # producing the calibration plots
223  # Errors ok
224  histo_calib_B0.Divide(hallo12)
225  histo_calib_B0bar.Divide(hallo12)
226 
227  # Fit for calibration plot
228  print(' ')
229  print('****************************** CALIBRATION CHECK FOR COMBINER USING ' +
230  method + ' ***************************************')
231  print(' ')
232  print('Fit polynomial of first order to the calibration plot. Expected value ~0.5')
233  print(' ')
234  histo_calib_B0.Fit(diag, 'TEST')
235  print(' ')
236  print('****************************** MEASURED EFFECTIVE EFFICIENCY FOR COMBINER USING ' +
237  method + ' ***********************************')
238  print('* ' +
239  ' *')
240  # get total number of entries
241  total_tagged = histo_entries_per_bin.GetEntries()
242  total_tagged_B0 = histo_entries_per_binB0.GetEntries()
243  total_tagged_B0bar = histo_entries_per_binB0bar.GetEntries()
244  total_notTagged = histo_notTaggedEvents.GetEntries()
245  total_entries = (total_tagged + total_notTagged)
246  # To a good approximation we assume that half of the not tagged B mesons were B0 (B0bar)
247  total_entriesB0 = (total_tagged_B0 + total_notTagged / 2)
248  total_entriesB0bar = (total_tagged_B0bar + total_notTagged / 2)
249 
250  tagging_eff = total_tagged / (total_tagged + total_notTagged)
251  DeltaTagging_eff = math.sqrt(total_tagged * total_notTagged**2 + total_notTagged * total_tagged**2) / (total_entries**2)
252  tot_eff_effB0 = 0
253  tot_eff_effB0bar = 0
254  average_eff_eff = 0
255  uncertainty_eff_effB0 = 0
256  uncertainty_eff_effB0bar = 0
257  uncertainty_eff_effAverage = 0
258  diff_eff_Uncertainty = 0
259  event_fractionB0 = array('f', [0] * r_size)
260  event_fractionB0bar = array('f', [0] * r_size)
261  event_fractionTotal = array('f', [0] * r_size)
262  event_fractionTotalUncertainty = array('f', [0] * r_size)
263  eventsInBin_B0 = array('f', [0] * r_size)
264  eventsInBin_B0bar = array('f', [0] * r_size)
265  eventsInBin_Total = array('f', [0] * r_size)
266  event_fractionDiff = array('f', [0] * r_size)
267  event_fractionDiffUncertainty = array('f', [0] * r_size)
268  rvalueB0 = array('f', [0] * r_size)
269  rvalueB0bar = array('f', [0] * r_size)
270  rvalueB0Average = array('f', [0] * r_size)
271  rvalueStdB0 = array('f', [0] * r_size)
272  rvalueStdB0bar = array('f', [0] * r_size)
273  rvalueStdB0Average = array('f', [0] * r_size)
274  wvalue = array('f', [0] * r_size)
275  wvalueUncertainty = array('f', [0] * r_size)
276  wvalueB0 = array('f', [0] * r_size)
277  wvalueB0bar = array('f', [0] * r_size)
278  wvalueB0Uncertainty = array('f', [0] * r_size)
279  wvalueB0barUncertainty = array('f', [0] * r_size)
280  wvalueDiff = array('f', [0] * r_size)
281  wvalueDiffUncertainty = array('f', [0] * r_size)
282  entries = array('f', [0] * r_size)
283  entriesB0 = array('f', [0] * r_size)
284  entriesB0bar = array('f', [0] * r_size)
285  iEffEfficiency = array('f', [0] * r_size)
286  iEffEfficiencyUncertainty = array('f', [0] * r_size)
287  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
288  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
289  iEffEfficiencyB0UncertaintyFromOutput = array('f', [0] * r_size)
290  iEffEfficiencyB0barUncertaintyFromOutput = array('f', [0] * r_size)
291 
292  iDeltaEffEfficiency = array('f', [0] * r_size)
293  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
294  muParam = array('f', [0] * r_size)
295  muParamUncertainty = array('f', [0] * r_size)
296  # intervallEff = array('f', [0] * r_size)
297 
298  print('* --> DETERMINATION BASED ON MONTE CARLO ' +
299  ' *')
300  print('* ' +
301  ' *')
302  print('* Note: mu = Delta_Effcy/(2*Efficiency). Needed for CP analysis ' +
303  'together with w and Delta_w *')
304  print('* ' +
305  ' *')
306  print('* ------------------------------------------------------------------' +
307  '-------------------------------------------------- *')
308  print('* r-interval <r> Efficiency Delta_Effcy ' +
309  ' mu w Delta_w *')
310  print('* ------------------------------------------------------------------' +
311  '-------------------------------------------------- *')
312  performance = []
313  for i in range(1, r_size):
314  # get the average r-value
315  entries[i] = histo_entries_per_bin.GetBinContent(i)
316  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
317  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
318  # fraction of events/all events
319 
320  event_fractionB0[i] = entriesB0[i] / total_entriesB0
321  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
322 
323  # event_fractionTotal[i] = (entriesB0[i] + entriesB0bar[i]) / total_entries
324  # event_fractionDiff[i] = (entriesB0[i] - entriesB0bar[i]) / total_entries
325 
326  event_fractionTotal[i] = (event_fractionB0[i] + event_fractionB0bar[i]) / 2
327  event_fractionDiff[i] = event_fractionB0[i] - event_fractionB0bar[i]
328 
329  event_fractionDiffUncertainty[i] = math.sqrt(entriesB0[i] *
330  (total_entriesB0 -
331  entriesB0[i]) /
332  total_entriesB0**3 +
333  entriesB0bar[i] *
334  (total_entriesB0bar -
335  entriesB0bar[i]) /
336  total_entriesB0bar**3)
337 
338  event_fractionTotalUncertainty[i] = event_fractionDiffUncertainty[i] / 2
339 
340  rvalueB0[i] = histo_avr_rB0.GetBinContent(i)
341  rvalueB0bar[i] = histo_avr_rB0bar.GetBinContent(i)
342  rvalueB0Average[i] = histo_avr_r.GetBinContent(i) # (rvalueB0[i] + rvalueB0bar[i]) / 2
343  rvalueStdB0[i] = math.sqrt(histo_ms_rB0.GetBinContent(
344  i) - (histo_avr_rB0.GetBinContent(i))**2) / math.sqrt(entriesB0[i] - 1)
345  rvalueStdB0bar[i] = math.sqrt(histo_ms_rB0bar.GetBinContent(
346  i) - (histo_avr_rB0bar.GetBinContent(i))**2) / math.sqrt(entriesB0bar[i] - 1)
347  rvalueStdB0Average[i] = math.sqrt(rvalueStdB0[i]**2 + rvalueStdB0bar[i]**2) / 2
348  # math.sqrt(histo_ms_r.GetBinContent(i) - (histo_avr_r.GetBinContent(i))**2)
349  # calculate the wrong tag fractin (only true if MC data good)
350 
351  wvalueB0[i] = histo_mc_NwB0.GetBinContent(i) / entriesB0[i] # (1 - rvalueB0[i]) / 2
352  wvalueB0bar[i] = histo_mc_NwB0bar.GetBinContent(i) / entriesB0bar[i] # (1 - rvalueB0bar[i]) / 2
353  wvalueDiff[i] = wvalueB0[i] - wvalueB0bar[i]
354  wvalueB0Uncertainty[i] = math.sqrt(histo_mc_NwB0.GetBinContent(
355  i) * (entriesB0[i] - histo_mc_NwB0.GetBinContent(i)) / (entriesB0[i]**3))
356  wvalueB0barUncertainty[i] = math.sqrt(histo_mc_NwB0bar.GetBinContent(
357  i) * (entriesB0bar[i] - histo_mc_NwB0bar.GetBinContent(i)) / (entriesB0bar[i]**3))
358  # math.sqrt((rvalueStdB0[i] / 2)**2 + (rvalueStdB0bar[i] / 2)**2)
359  wvalueDiffUncertainty[i] = math.sqrt(wvalueB0Uncertainty[i]**2 + wvalueB0barUncertainty[i]**2)
360  wvalue[i] = (wvalueB0[i] + wvalueB0bar[i]) / 2
361  wvalueUncertainty[i] = wvalueDiffUncertainty[i] / 2
362 
363  # Avr Efficiency
364 
365  # iEffEfficiency[i] = (event_fractionB0[i] * rvalueB0[i] * rvalueB0[i] +
366  # event_fractionB0bar[i] * rvalueB0bar[i] * rvalueB0bar[i]) / 2
367 
368  iEffEfficiency[i] = event_fractionTotal[i] * (1 - 2 * wvalue[i])**2
369 
370  iEffEfficiencyUncertainty[i] = (1 - 2 * wvalue[i]) * \
371  math.sqrt((2 * event_fractionTotal[i] * 2 * wvalueUncertainty[i])**2 +
372  (1 - 2 * wvalue[i])**2 * event_fractionTotalUncertainty[i]**2)
373 
374  # iEffEfficiencyUncertainty[i] = rvalueB0Average[i] * \
375  # math.sqrt((2 * total_entries * entries[i] * rvalueStdB0Average[i])**2 +
376  # rvalueB0Average[i]**2 * entries[i] *
377  # (total_entries * (total_entries - entries[i]) +
378  # entries[i] * total_notTagged)) / (total_entries**2)
379 
380  # iEffEfficiencyB0UncertaintyFromOutput[i] = rvalueB0[i] * \
381  # math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
382  # rvalueB0[i]**2 * entriesB0[i] *
383  # total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
384  # iEffEfficiencyB0barUncertaintyFromOutput[i] = rvalueB0bar[i] * \
385  # math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
386  # rvalueB0bar[i]**2 * entriesB0bar[i] *
387  # total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
388 
389  # iEffEfficiencyUncertainty[i] =
390  # math.sqrt(iEffEfficiencyB0UncertaintyFromOutput[i]**2 +
391  # iEffEfficiencyB0barUncertaintyFromOutput[i]**2) / 2
392 
393  # iEffEfficiency[i] = (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
394 
395  average_eff_eff += iEffEfficiency[i]
396  # average_eff_eff += (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
397 
398  # Delta Eff
399 
400  iDeltaEffEfficiency[i] = event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2 - \
401  event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
402 
403  iEffEfficiencyB0Uncertainty[i] = (1 - 2 * wvalueB0[i]) * \
404  math.sqrt((2 * total_entriesB0 * entriesB0[i] * 2 * wvalueB0Uncertainty[i])**2 +
405  (1 - 2 * wvalueB0[i])**2 * entriesB0[i] *
406  total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
407  iEffEfficiencyB0barUncertainty[i] = (1 - 2 * wvalueB0bar[i]) * \
408  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * 2 * wvalueB0barUncertainty[i])**2 +
409  (1 - 2 * wvalueB0bar[i])**2 * entriesB0bar[i] *
410  total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
411 
412  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
413  # iEffEfficiencyUncertainty[i] = iDeltaEffEfficiencyUncertainty[i]/2
414 
415  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
416 
417  # finally calculating the total effective efficiency
418  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2
419  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
420  uncertainty_eff_effAverage = uncertainty_eff_effAverage + iEffEfficiencyUncertainty[i]**2
421  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
422  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
423  muParam[i] = event_fractionDiff[i] / (2 * event_fractionTotal[i])
424  muParamUncertainty[i] = event_fractionDiffUncertainty[i] / (2 * event_fractionTotal[i]) * math.sqrt(muParam[i]**2 + 1)
425 
426  # intervallEff[i] = event_fractionTotal[i] * rvalueB0Average[i] * rvalueB0Average[i]
427  print('* ' + f'{r_subsample[i - 1]:.3f}' + ' - ' + f'{r_subsample[i]:.3f}' + ' ' +
428  f'{rvalueB0Average[i]:.3f}' + ' +- ' + f'{rvalueStdB0Average[i]:.4f}' + ' ' +
429  f'{event_fractionTotal[i]:.4f}' + ' ' +
430  f'{event_fractionDiff[i]: .4f}' + ' +- ' + f'{event_fractionDiffUncertainty[i]:.4f}' + ' ' +
431  f'{muParam[i]: .4f}' + ' +- ' + f'{muParamUncertainty[i]:.4f}' + ' ' +
432  f'{wvalue[i]:.4f}' + ' +- ' + f'{wvalueUncertainty[i]:.4f}' + ' ' +
433  f'{wvalueDiff[i]: .4f}' + ' +- ' + f'{wvalueDiffUncertainty[i]:.4f}' + ' *')
434 
435  # average_eff_eff = (tot_eff_effB0 + tot_eff_effB0bar) / 2
436  uncertainty_eff_effAverage = math.sqrt(uncertainty_eff_effAverage)
437  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
438  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
439  diff_eff = tot_eff_effB0 - tot_eff_effB0bar
440  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
441  print('* --------------------------------------------------------------------------------------------------' +
442  '------------------ *')
443  print('* *')
444  print('* __________________________________________________________________________________________ *')
445  print('* | | *')
446  print('* | TOTAL NUMBER OF TAGGED EVENTS = ' +
447  f"{f'{total_tagged:.0f}':<24}" + f"{'| *':>36}")
448  print('* | | *')
449  print(
450  '* | TOTAL AVERAGE EFFICIENCY (q=+-1)= ' +
451  f'{tagging_eff * 100:.2f}' +
452  " +- " +
453  f'{DeltaTagging_eff * 100:.2f}' +
454  ' % | *')
455  print('* | | *')
456  print(
457  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY (q=+-1)= ' +
458  f'{average_eff_eff * 100:.6f}' +
459  " +- " +
460  f'{uncertainty_eff_effAverage * 100:.6f}' +
461  ' % | *')
462  print('* | | *')
463  print(
464  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY ASYMMETRY (q=+-1)= ' +
465  f'{diff_eff * 100:^9.6f}' +
466  " +- " +
467  f'{diff_eff_Uncertainty * 100:.6f}' +
468  ' % | *')
469  print('* | | *')
470  print('* | B0-TAGGER TOTAL EFFECTIVE EFFICIENCIES: ' +
471  f'{tot_eff_effB0 * 100:.2f}' + " +-" + f'{uncertainty_eff_effB0 * 100: 4.2f}' +
472  ' % (q=+1) ' +
473  f'{tot_eff_effB0bar * 100:.2f}' + " +-" + f'{uncertainty_eff_effB0bar * 100: 4.2f}' +
474  ' % (q=-1) ' + ' | *')
475  print('* | | *')
476  print('* | FLAVOR PERCENTAGE (MC): ' +
477  f'{total_tagged_B0 / total_tagged * 100:.2f}' + ' % (q=+1) ' +
478  f'{total_tagged_B0bar / total_tagged * 100:.2f}' + ' % (q=-1) Diff=' +
479  f'{(total_tagged_B0 - total_tagged_B0bar) / total_tagged * 100:^5.2f}' + ' % | *')
480  print('* |__________________________________________________________________________________________| *')
481  print('* *')
482  print('****************************************************************************************************')
483  print('* *')
484 
485  # not that important
486  print('* --------------------------------- *')
487  print('* Efficiency Determination - easiest way *')
488  print('* --------------------------------- *')
489  total_tagged_B0 = histo_belleplotB0.GetEntries()
490  total_tagged_B0Bar = histo_belleplotB0bar.GetEntries()
491  total_tagged_wrong = histo_m1.GetEntries()
492  total_tagged_B0Bar_wrong = histo_m2.GetEntries()
493  total_tagged = total_tagged_B0 + total_tagged_B0Bar
494  total_tagged_wrong = total_tagged_wrong + total_tagged_B0Bar_wrong
495 
496  wrong_tag_fraction_B0 = total_tagged_wrong / total_tagged_B0
497  wrong_tag_fraction_B0Bar = total_tagged_B0Bar_wrong / total_tagged_B0Bar
498  wrong_tag_fraction = total_tagged_wrong / total_tagged
499  right_tag_fraction_B0 = 1 - 2 * wrong_tag_fraction_B0
500  right_tag_fraction_B0Bar = 1 - 2 * wrong_tag_fraction_B0Bar
501  right_tag_fraction = 1 - 2 * wrong_tag_fraction
502  wrong_eff_B0 = right_tag_fraction_B0 * right_tag_fraction_B0
503  wrong_eff_B0Bar = right_tag_fraction_B0Bar * right_tag_fraction_B0Bar
504  wrong_eff = right_tag_fraction * right_tag_fraction
505 
506  print('* wrong_tag_fraction for all: ' +
507  f'{wrong_tag_fraction * 100:.3f}' +
508  ' % *')
509  print('* right_tag_fraction for all: ' +
510  f'{right_tag_fraction * 100:.3f}' +
511  ' % *')
512  print('* wrong calculated eff all: ' + f'{wrong_eff * 100:.3f}' +
513  ' % *')
514  print('* *')
515  print('****************************************************************************************************')
516  print('')
517  print('Table For B2TIP')
518  print('')
519  # write out the histograms
520  # histo_avr_r.Write('', ROOT.TObject.kOverwrite)
521  # histo_entries_per_bin.Write('', ROOT.TObject.kOverwrite)
522 
523  # histo_Cnet_output_B0.Write('', ROOT.TObject.kOverwrite)
524  # histo_Cnet_output_B0bar.Write('', ROOT.TObject.kOverwrite)
525  # histo_belleplotB0.Write('', ROOT.TObject.kOverwrite)
526  # histo_belleplotB0bar.Write('', ROOT.TObject.kOverwrite)
527  # histo_calib_B0.Write('', ROOT.TObject.kOverwrite)
528  # histo_calib_B0bar.Write('', ROOT.TObject.kOverwrite)
529 
530  maxB0 = histo_belleplotB0.GetBinContent(histo_belleplotB0.GetMaximumBin())
531  maxB0bar = histo_belleplotB0bar.GetBinContent(histo_belleplotB0bar.GetMaximumBin())
532 
533  Ymax = max(maxB0, maxB0bar)
534  Ymax = Ymax + Ymax / 12
535 
536  if YmaxForQrPlot < Ymax:
537  YmaxForQrPlot = Ymax
538 
539  # produce a pdf
540  ROOT.gStyle.SetOptStat(0)
541  Canvas1 = ROOT.TCanvas('Bla' + method, 'Final Output', 1200, 800)
542  Canvas1.cd() # activate
543  histo_belleplotB0.SetFillColorAlpha(ROOT.kBlue, 0.2)
544  histo_belleplotB0.SetFillStyle(1001)
545  histo_belleplotB0.GetYaxis().SetLabelSize(0.03)
546  histo_belleplotB0.GetYaxis().SetLimits(0, YmaxForQrPlot)
547  histo_belleplotB0.GetYaxis().SetTitleOffset(1.2)
548  histo_belleplotB0.SetLineColor(ROOT.kBlue)
549  histo_belleplotB0bar.SetFillColorAlpha(ROOT.kRed, 1.0)
550  histo_belleplotB0bar.SetFillStyle(3005)
551  histo_belleplotB0bar.SetLineColor(ROOT.kRed)
552  # SetLabelSize etc SetTitle
553 
554  histo_belleplotB0.SetTitle('Final Flavor Tagger Output; #it{qr}-output ; Events'
555  )
556  histo_belleplotB0.SetMinimum(0)
557  histo_belleplotB0.SetMaximum(YmaxForQrPlot)
558  histo_belleplotB0.Draw('hist')
559  histo_belleplotB0bar.Draw('hist same')
560 
561  leg = ROOT.TLegend(0.75, 0.8, 0.9, 0.9)
562  leg.AddEntry(histo_belleplotB0, 'true B0')
563  leg.AddEntry(histo_belleplotB0bar, 'true B0bar')
564  leg.Draw()
565 
566  Canvas1.Update()
567  # IPython.embed()
568  with Quiet(ROOT.kError):
569  Canvas1.SaveAs(workingDirectory + '/' + 'PIC_Belleplot_both' + method + '.pdf')
570 
571  # produce the nice calibration plot
572  Canvas2 = ROOT.TCanvas('Bla2' + method, 'Calibration plot for true B0', 1200, 800)
573  Canvas2.cd() # activate
574  histo_calib_B0.SetFillColorAlpha(ROOT.kBlue, 0.2)
575  histo_calib_B0.SetFillStyle(1001)
576  histo_calib_B0.GetYaxis().SetTitleOffset(1.2)
577  histo_calib_B0.SetLineColor(ROOT.kBlue)
578 
579  histo_calib_B0.SetTitle('Calibration For True B0; #it{qr}-output ; Calibration '
580  )
581  histo_calib_B0.Draw('hist')
582  diag.Draw('SAME')
583  Canvas2.Update()
584  with Quiet(ROOT.kError):
585  Canvas2.SaveAs(workingDirectory + '/' + 'PIC_Calibration_B0' + method + '.pdf')
586 
587  histo_avr_r.Delete()
588  histo_avr_rB0.Delete()
589  histo_avr_rB0bar.Delete()
590  histo_ms_r.Delete()
591  histo_ms_rB0.Delete()
592  histo_ms_rB0bar.Delete()
593  histo_mc_NwB0.Delete()
594  histo_mc_NwB0bar.Delete()
595  histo_notTaggedEvents.Delete()
596  histo_entries_per_bin.Delete()
597  histo_entries_per_binB0.Delete()
598  histo_entries_per_binB0bar.Delete()
599  histo_Cnet_output_B0.Delete()
600  histo_Cnet_output_B0bar.Delete()
601  histo_belleplotB0.Delete()
602  histo_belleplotB0bar.Delete()
603  histo_calib_B0.Delete()
604  histo_calib_B0bar.Delete()
605  hallo12.Delete()
606  histo_m0.Delete()
607  histo_m1.Delete()
608  histo_m2.Delete()
609  Canvas1.Clear()
610  Canvas2.Clear()
611 
612  print(r'\begin{tabularx}{1\textwidth}{@{}r r r r r r r@{}}')
613  print(r'\hline')
614  print(r'$r$- Interval $\enskip$ & $\varepsilon_i\ $ & $\Delta\varepsilon_i\ $ & $w_i \pm \delta w_i\enskip\; $ ' +
615  r' & $\Delta w_i \pm \delta\Delta w_i $& $\varepsilon_{\text{eff}, i} \pm \delta\varepsilon_{\text{eff}, i}\enskip\, $ ' +
616  r' & $\Delta \varepsilon_{\text{eff}, i} \pm \delta\Delta \varepsilon_{\text{eff}, i}\enskip\, $\\ \hline\hline')
617  for i in range(1, r_size):
618  print('$ ' + f'{r_subsample[i - 1]:.3f}' + ' - ' + f'{r_subsample[i]:.3f}' + '$ & $' +
619  f'{event_fractionTotal[i] * 100: 6.1f}' + r'$ & $' +
620  f'{event_fractionDiff[i] * 100: 7.2f}' + r'\;$ & $' +
621  f'{wvalue[i] * 100: 7.2f}' + r" \pm " + f'{wvalueUncertainty[i] * 100:2.2f}' + r'\enskip $ & $' +
622  f'{wvalueDiff[i] * 100: 7.2f}' + r" \pm " +
623  f'{wvalueDiffUncertainty[i] * 100:2.2f}' + r'\enskip $ & $' +
624  f'{iEffEfficiency[i] * 100: 8.4f}' + # + '$ & $' +
625  r" \pm " + f'{iEffEfficiencyUncertainty[i] * 100:2.4f}' + r'\, $ & $' +
626  f'{iDeltaEffEfficiency[i] * 100: 6.4f}' + # +
627  r" \pm " + f'{iDeltaEffEfficiencyUncertainty[i] * 100:2.4f}' +
628  r'\enskip\enskip $ \\ ')
629  print(r'\hline\hline')
630  print(r'\multicolumn{1}{r}{Total} & & \multicolumn{5}{r}{ $\varepsilon_\text{eff} = ' +
631  r'\sum_i \varepsilon_i \cdot \langle 1-2w_i\rangle^2 = ' +
632  f'{average_eff_eff * 100: 6.2f}' + r" \pm " + f'{uncertainty_eff_effAverage * 100: 6.2f}' + r'\enskip\, ')
633  print(r'\Delta \varepsilon_\text{eff} = ' +
634  f'{diff_eff * 100: 6.2f}' + r" \pm " + f'{diff_eff_Uncertainty * 100: 6.2f}' + r'\quad\ $ }' +
635  r' \\ ')
636  print(r'\hline')
637  print(r'\end{tabular}')
638  print('')
639 
640  print('')
641  print('Mu-Values for Table')
642  print('')
643 
644  for i in range(1, r_size):
645  print('$ ' + f'{r_subsample[i - 1]:.3f}' + ' - ' + f'{r_subsample[i]:.3f}' + '$ & $' +
646  f'{muParam[i] * 100: 7.2f}' + r" \pm " + f'{muParamUncertainty[i] * 100:2.2f}' + r' $ & ')
647 # ************************************************
648 # DETERMINATION OF INDIVIDUAL EFFECTIVE EFFICIENCY
649 # ************************************************
650 
651 # keep in mind:
652 # the individual efficiency is determined on basis of the combiner training.
653 # Whereas the efficiency is determined on basis of the final expert output.
654 
655 print(ft.getEventLevelParticleLists(usedCategories))
656 
657 
658 print('******************************************* MEASURED EFFECTIVE EFFICIENCY FOR INDIVIDUAL CATEGORIES ' +
659  '**********************************************')
660 print('* ' +
661  ' *')
662 # input: Classifier input from event-level. Output of event-level is recalculated for input on combiner-level.
663 # but is re-evaluated under combiner target. Signal is B0, background is B0Bar.
664 categoriesPerformance = []
665 NbinsCategories = 100
666 for category in usedCategories:
667  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
668  hist_both = ROOT.TH1F('Both_' + category, 'Input Both (B0) ' +
669  category + ' (binning)', NbinsCategories, -1.0, 1.0)
670  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
671  hist_signal = ROOT.TH1F('Signal_' + category, 'Input Signal (B0) ' +
672  category + ' (binning)', NbinsCategories, -1.0, 1.0)
673  # histogram of input variable (only background) - not yet a probability! It's a classifier plot!
674  hist_background = ROOT.TH1F('Background_' + category, 'Input Background (B0bar) ' +
675  category + ' (binning)', NbinsCategories, -1.0, 1.0)
676 
677  # per definition that input is not comparable to the network output, this has to be transformed.
678  # probability output from 0 to 1 (corresponds to net output probability) -> calculation below
679  hist_probB0 = ROOT.TH1F('Probability' + category,
680  'Transformed to probability (B0) (' + category + ')',
681  NbinsCategories, 0.0, 1.0)
682  hist_probB0bar = ROOT.TH1F('ProbabilityB0bar_' + category,
683  'Transformed to probability (B0bar) (' + category + ')',
684  NbinsCategories, 0.0, 1.0)
685  # qp output from -1 to 1 -> transformation below
686  hist_qrB0 = ROOT.TH1F('QR' + category, 'Transformed to qp (B0)(' +
687  category + ')', NbinsCategories, -1.0, 1.0)
688  hist_qrB0bar = ROOT.TH1F('QRB0bar_' + category, 'Transformed to qp (B0bar) (' +
689  category + ')', NbinsCategories, -1.0, 1.0)
690  # histogram for abs(qp), i.e. this histogram contains the r-values -> transformation below
691  # also used to get the number of entries, sorted into 6 bins
692  histo_entries_per_bin = ROOT.TH1F('entries_per_bin_' + category, 'Abs(qp)(B0) (' + category + ')', int(r_size - 2), r_subsample)
693  histo_entries_per_binB0 = ROOT.TH1F('entries_per_bin' + category, 'Abs(qp)(B0) (' +
694  category + ')', int(r_size - 2), r_subsample)
695  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar_' + category,
696  'Abs(qp) (B0bar) (' + category + ')', int(r_size - 2), r_subsample)
697 
698  # histogram contains at the end the average r values -> calculation below
699  # sorted into r bins
700  hist_avr_rB0 = ROOT.TH1F('Average_r' + category, 'Average r for B0' +
701  category, int(r_size - 2), r_subsample)
702  hist_avr_rB0bar = ROOT.TH1F('Average_rB0bar_' + category, 'Average r for B0bar' +
703  category, int(r_size - 2), r_subsample)
704 
705  hist_ms_rB0 = ROOT.TH1F('AverageSqrdR' + category, 'Average r sqrd for B0' +
706  category, int(r_size - 2), r_subsample)
707  hist_ms_rB0bar = ROOT.TH1F('AverageSqrdRB0bar_' + category, 'Average r sqrd for B0bar' +
708  category, int(r_size - 2), r_subsample)
709 
710  # ****** TEST OF CALIBRATION ******
711  # for calibration plot we want to have
712  hist_all = ROOT.TH1F('All_' + category, 'Input Signal (B0) and Background (B0Bar)' +
713  category + ' (binning 50)', 50, 0.0, 1.0)
714  hist_calib_B0 = ROOT.TH1F('Calib_' + category, 'Calibration Plot for true B0' +
715  category + ' (binning 50)', 50, 0.0, 1.0)
716 
717  # fill both
718  if category != "KaonNotWeighted" and category != "LambdaNotWeighted":
719  # if category == 'Lambda' or combinerVariable.find('weighted') == -1:
720  tree.Draw('qp' + category + '>>Both_' + category, 'abs(qrMC) == 1.0')
721  # fill signal
722  tree.Draw('qp' + category + '>>Signal_' + category, 'qrMC == 1.0')
723  # fill background
724  tree.Draw('qp' + category + '>>Background_' + category, 'qrMC == -1.0')
725  tree.Draw('qp' + category + '>>All_' + category, 'qrMC!=0')
726  tree.Draw('qp' + category + '>>Calib_' + category, 'qrMC == 1.0')
727 
728  # elif combinerVariable.find('weighted') != -1:
729  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
730  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Signal_' + category, 'qrMC == 1.0')
731  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Background_' + category, 'qrMC == -1.0')
732  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>All_' + category, 'qrMC!=0')
733  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Calib_' + category, 'qrMC == 1.0')
734  # category = category + 'W'
735 
736  elif category == "KaonNotWeighted":
737  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
738  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Signal_' + category, 'qrMC == 1.0')
739  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Background_' + category, 'qrMC == -1.0')
740  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>All_' + category, 'qrMC!=0')
741  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Calib_' + category, 'qrMC == 1.0')
742 
743  elif category == "LambdaNotWeighted":
744  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
745  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Signal_' + category, 'qrMC == 1.0')
746  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Background_' + category, 'qrMC == -1.0')
747  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>All_' + category, 'qrMC!=0')
748  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Calib_' + category, 'qrMC == 1.0')
749 
750  hist_calib_B0.Divide(hist_all)
751  # ****** produce the input plots from combiner level ******
752 
753  maxSignal = hist_signal.GetBinContent(hist_signal.GetMaximumBin())
754  maxBackground = hist_background.GetBinContent(hist_background.GetMaximumBin())
755 
756  Ymax = max(maxSignal, maxBackground)
757  Ymax = Ymax + Ymax / 12
758 
759  ROOT.gStyle.SetOptStat(0)
760  with Quiet(ROOT.kError):
761  Canvas = ROOT.TCanvas('Bla', 'TITEL BLA', 1200, 800)
762  Canvas.cd() # activate
763  Canvas.SetLogy()
764  hist_signal.SetFillColorAlpha(ROOT.kBlue, 0.2)
765  hist_signal.SetFillStyle(1001)
766  hist_signal.SetTitleSize(0.1)
767  hist_signal.GetXaxis().SetLabelSize(0.04)
768  hist_signal.GetYaxis().SetLabelSize(0.04)
769  hist_signal.GetXaxis().SetTitleSize(0.05)
770  hist_signal.GetYaxis().SetTitleSize(0.05)
771  hist_signal.GetXaxis().SetTitleOffset(0.95)
772  hist_signal.GetYaxis().SetTitleOffset(1.1)
773  hist_signal.GetYaxis().SetLimits(0, Ymax)
774  hist_signal.SetLineColor(ROOT.kBlue)
775  hist_background.SetFillColorAlpha(ROOT.kRed, 1.0)
776  hist_background.SetFillStyle(3005)
777  hist_background.GetYaxis().SetLimits(0, Ymax)
778  hist_background.SetLineColor(ROOT.kRed)
779 
780  hist_signal.SetTitle(category + ' category; #it{qp}-Output ; Events')
781  # hist_signal.SetMinimum(0)
782  hist_signal.SetMaximum(Ymax)
783  # hist_background.SetMinimum(0)
784  hist_background.SetMaximum(Ymax)
785 
786  hist_signal.Draw('hist')
787  hist_background.Draw('hist same')
788 
789  if category == 'MaximumPstar':
790  legend = ROOT.TLegend(0.4, 0.75, 0.6, 0.9)
791  else:
792  legend = ROOT.TLegend(0.6, 0.75, 0.8, 0.9)
793  legend.AddEntry(hist_signal, 'true B0')
794  legend.AddEntry(hist_background, 'true B0bar')
795  legend.SetTextSize(0.05)
796  legend.Draw()
797 
798  Canvas.Update()
799  with Quiet(ROOT.kError):
800  Canvas.SaveAs(workingDirectory + '/' + 'PIC_' + category + '_Input_Combiner.pdf')
801 
802  # ***** TEST OF CALIBRATION ******
803 
804  # initialize some arrays
805  binCounter = int(NbinsCategories + 1)
806  dilutionB02 = array('d', [0] * binCounter)
807  dilutionB0bar2 = array('d', [0] * binCounter)
808  purityB0 = array('d', [0] * binCounter)
809  purityB0bar = array('d', [0] * binCounter)
810  signal = array('d', [0] * binCounter)
811  back = array('d', [0] * binCounter)
812  weight = array('d', [0] * binCounter)
813 
814  for i in range(1, binCounter):
815  # doing the transformation to probabilities
816  signal[i] = hist_signal.GetBinContent(i)
817  back[i] = hist_background.GetBinContent(i)
818  weight[i] = signal[i] + back[i]
819 
820  # avoid dividing by zero
821  if signal[i] + back[i] == 0:
822  purityB0[i] = 0
823  dilutionB02[i] = 0
824  purityB0bar[i] = 0
825  dilutionB0bar2[i] = 0
826  else:
827 
828  purityB0[i] = signal[i] / (signal[i] + back[i])
829  dilutionB02[i] = -1 + 2 * signal[i] / (signal[i] + back[i])
830 
831  purityB0bar[i] = back[i] / (signal[i] + back[i])
832  dilutionB0bar2[i] = -1 + 2 * back[i] / (signal[i] + back[i])
833 
834  # filling histogram with probability from 0 to 1
835  hist_probB0.Fill(purityB0[i], signal[i])
836  hist_probB0bar.Fill(purityB0bar[i], back[i])
837 
838  # filling histogram with qr from -1 to 1
839  hist_qrB0.Fill(dilutionB02[i], signal[i])
840  hist_qrB0bar.Fill(dilutionB0bar2[i], back[i])
841 
842  # filling histogram with abs(qr), i.e. this histogram contains the r-values (not qp)
843  histo_entries_per_binB0.Fill(abs(dilutionB02[i]), signal[i])
844  histo_entries_per_binB0bar.Fill(abs(dilutionB0bar2[i]), back[i])
845  # filling histogram with abs(qr) special weighted - needed for average r calculation
846  hist_avr_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i]) * signal[i])
847  hist_avr_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i]) * back[i])
848  # filling histogram with abs(qr)**2 special weighted - needed for std dev of r calculation
849  hist_ms_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i] * dilutionB02[i]) * signal[i])
850  hist_ms_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i] * dilutionB02[i]) * back[i])
851 
852  # hist_avr_rB0bar contains now the average r-value
853  hist_avr_rB0.Divide(histo_entries_per_binB0)
854  hist_avr_rB0bar.Divide(histo_entries_per_binB0bar)
855 
856  hist_ms_rB0.Divide(histo_entries_per_binB0)
857  hist_ms_rB0bar.Divide(histo_entries_per_binB0bar)
858  # now calculating the efficiency
859 
860  # calculating number of events
861  total_entriesB0 = total_notTagged / 2
862  total_entriesB0bar = total_notTagged / 2
863  for i in range(1, r_size):
864  total_entriesB0 = total_entriesB0 + histo_entries_per_binB0.GetBinContent(i)
865  total_entriesB0bar = total_entriesB0bar + histo_entries_per_binB0bar.GetBinContent(i)
866  # initializing some arrays
867  tot_eff_effB0 = 0
868  tot_eff_effB0bar = 0
869  uncertainty_eff_effB0 = 0
870  uncertainty_eff_effB0bar = 0
871  uncertainty_eff_effAverage = 0
872  diff_eff_Uncertainty = 0
873  event_fractionB0 = array('f', [0] * r_size)
874  event_fractionB0bar = array('f', [0] * r_size)
875  rvalueB0 = array('f', [0] * r_size)
876  rvalueB0bar = array('f', [0] * r_size)
877  rvalueStdB0 = array('f', [0] * r_size)
878  rvalueStdB0bar = array('f', [0] * r_size)
879  # wvalue = array('f', [0] * r_size)
880  entriesBoth = array('f', [0] * r_size)
881  entriesB0 = array('f', [0] * r_size)
882  entriesB0bar = array('f', [0] * r_size)
883  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
884  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
885  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
886 
887  for i in range(1, r_size):
888 
889  entriesBoth[i] = entriesB0bar[i] + entriesB0[i]
890  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
891  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
892  event_fractionB0[i] = entriesB0[i] / total_entriesB0
893  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
894  # print '* Bin ' + str(i) + ' r-value: ' + str(rvalueB0[i]), 'entriesB0: ' +
895  # str(event_fractionB0[i] * 100) + ' % (' + str(entriesB0[i]) + '/' +
896  # str(total_entriesB0) + ')'
897 
898  rvalueB0[i] = hist_avr_rB0.GetBinContent(i)
899  rvalueB0bar[i] = hist_avr_rB0bar.GetBinContent(i)
900 
901  rvalueStdB0[i] = 0
902  rvalueStdB0bar[i] = 0
903 
904  if entriesB0[i] > 1:
905  rvalueStdB0[i] = math.sqrt(abs(hist_ms_rB0.GetBinContent(
906  i) - (hist_avr_rB0.GetBinContent(i))**2)) / math.sqrt(entriesB0[i] - 1)
907 
908  if entriesB0bar[i] > 1:
909  rvalueStdB0bar[i] = math.sqrt(abs(hist_ms_rB0bar.GetBinContent(
910  i) - (hist_avr_rB0bar.GetBinContent(i))**2)) / math.sqrt(entriesB0bar[i] - 1)
911  # wvalue[i] = (1 - rvalueB0[i]) / 2
912 
913  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * rvalueB0[i] \
914  * rvalueB0[i]
915  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * rvalueB0bar[i] \
916  * rvalueB0bar[i]
917 
918  iEffEfficiencyB0Uncertainty[i] = rvalueB0[i] * \
919  math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
920  rvalueB0[i]**2 * entriesB0[i] *
921  (total_entriesB0 * (total_entriesB0 - entriesB0[i]) +
922  entriesB0[i] * total_notTagged)) / (total_entriesB0**2)
923  iEffEfficiencyB0barUncertainty[i] = rvalueB0bar[i] * \
924  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
925  rvalueB0bar[i]**2 * entriesB0bar[i] *
926  (total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i]) +
927  entriesB0bar[i] * total_notTagged)) / (total_entriesB0bar**2)
928 
929  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
930 
931  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
932 
933  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
934  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
935 
936  effDiff = tot_eff_effB0 - tot_eff_effB0bar
937  effAverage = (tot_eff_effB0 + tot_eff_effB0bar) / 2
938 
939  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
940  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
941  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
942  uncertainty_eff_effAverage = diff_eff_Uncertainty / 2
943  print(
944  f"{'* ' + category:<25}" + ' B0-Eff=' +
945  f'{tot_eff_effB0 * 100: 8.2f}' + " +-" + f'{uncertainty_eff_effB0 * 100: 4.2f}' +
946  ' %' +
947  ' B0bar-Eff=' +
948  f'{tot_eff_effB0bar * 100: 8.2f}' + " +-" + f'{uncertainty_eff_effB0bar * 100: 4.2f}' +
949  ' %' +
950  ' EffAverage=' +
951  f'{effAverage * 100: 8.2f}' + " +- " + f'{uncertainty_eff_effAverage * 100:4.2f}' + ' %' +
952  ' EffDiff=' +
953  f'{effDiff * 100: 8.2f}' + " +- " + f'{diff_eff_Uncertainty * 100:4.2f}' + ' % *')
954 
955  # hist_signal.Write('', ROOT.TObject.kOverwrite)
956  # hist_background.Write('', ROOT.TObject.kOverwrite)
957  # hist_probB0.Write('', ROOT.TObject.kOverwrite)
958  # hist_probB0bar.Write('', ROOT.TObject.kOverwrite)
959  # hist_qpB0.Write('', ROOT.TObject.kOverwrite)
960  # hist_qpB0bar.Write('', ROOT.TObject.kOverwrite)
961  # hist_absqpB0.Write('', ROOT.TObject.kOverwrite)
962  # hist_absqpB0bar.Write('', ROOT.TObject.kOverwrite)
963  # hist_avr_rB0.Write('', ROOT.TObject.kOverwrite)
964  # hist_avr_rB0bar.Write('', ROOT.TObject.kOverwrite)
965  # hist_all.Write('', ROOT.TObject.kOverwrite)
966  # hist_calib_B0.Write('', ROOT.TObject.kOverwrite)
967  categoriesPerformance.append((category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty))
968  with Quiet(ROOT.kError):
969  Canvas.Clear()
970 # if average_eff != 0:
971  # print '* -------------------------------------------------------------------------'
972  # print '* ' + '{: > 8.2f}'.format(average_eff * 100) + ' %' \
973  # + '{:>85}'.format('TOTAL' + ' *')
974 
975 print('* ' +
976  ' *')
977 print('**************************************************************************************************************************' +
978  '************************')
979 print('')
980 print('Table For B2TIP')
981 print('')
982 print(r'\begin{tabular}{ l r r }')
983 print(r'\hline')
984 print(r'Categories & $\varepsilon_\text{eff} \pm \delta\varepsilon_\text{eff} $& ' +
985  r'$\Delta\varepsilon_\text{eff} \pm \delta\Delta\varepsilon_\text{eff}$\\ \hline\hline')
986 for (category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty) in categoriesPerformance:
987  print(
988  f'{category:<23}' +
989  ' & $' +
990  f'{effAverage * 100: 6.2f}' + r" \pm " + f'{uncertainty_eff_effAverage * 100:4.2f}' +
991  ' $ & $' +
992  f'{effDiff * 100: 6.2f}' + r" \pm " + f'{diff_eff_Uncertainty * 100:4.2f}' +
993  r'\ \enskip $ \\')
994 print(r'\hline')
995 print(r'\end{tabular}')
996 B2INFO('qp Output Histograms in pdf format saved at: ' + workingDirectory)
997 with open("categoriesPerformance.pkl", "wb") as f:
998  pickle.dump(categoriesPerformance, f)