Belle II Software  release-08-01-10
flavorTaggerEfficiency.py
1 #!/usr/bin/env python3
2 
3 
10 
11 # ************** Flavor Tagging **************
12 # * *
13 # * This script calculates the parameters char- *
14 # * acterizing the performance of the flavor *
15 # * tagger. It produces plots for the qr *
16 # * distribution of the combiners and the dis- *
17 # * tributions of the combiner input variables. *
18 # * *
19 # ***********************************************
20 
21 import ROOT
22 from basf2 import B2INFO
23 import flavorTagger as ft
24 from defaultEvaluationParameters import categories, Quiet, r_subsample, r_size
25 from array import array
26 import pickle
27 import math
28 import glob
29 import sys
30 
31 ROOT.gROOT.SetBatch(True)
32 
33 if len(sys.argv) != 3:
34  sys.exit("Must provide 2 arguments: [input_sim_file] or ['input_sim_file*'] with wildcards and [treeName]"
35  )
36 workingFile = sys.argv[1]
37 workingFiles = glob.glob(str(workingFile))
38 treeName = str(sys.argv[2])
39 
40 if len(workingFiles) < 1:
41  sys.exit("No file name or file names " + str(workingFile) + " found.")
42 
43 
44 workingDirectory = '.'
45 
46 #
47 # *******************************************
48 # DETERMINATION OF TOTAL EFFECTIVE EFFICIENCY
49 # *******************************************
50 #
51 
52 # working directory
53 # needs the B0bar_final.root-file
54 # treeName = 'B0tree'
55 
56 
57 methods = []
58 
59 tree = ROOT.TChain(treeName)
60 
61 mcstatus = array('d', [-511.5, 0.0, 511.5])
62 ROOT.TH1.SetDefaultSumw2()
63 
64 for iFile in workingFiles:
65  tree.AddFile(iFile)
66 
67 totalBranches = []
68 for branch in tree.GetListOfBranches():
69  totalBranches.append(branch.GetName())
70 
71 if 'FBDT_qrCombined' in totalBranches:
72  methods.append("FBDT")
73 
74 if 'FANN_qrCombined' in totalBranches:
75  methods.append("FANN")
76 
77 if 'DNN_qrCombined' in totalBranches:
78  methods.append("DNN")
79 
80 usedCategories = []
81 for cat in categories:
82  catBranch = 'qp' + cat
83  if catBranch in totalBranches:
84  usedCategories.append(cat)
85 
86 YmaxForQrPlot = 0
87 
88 total_notTagged = 0
89 
90 for method in methods:
91  # Get error with GetBinError(), set error with SetBinError()
92  # histogram contains the average r in each of 7 bins -> calculation see below
93  histo_avr_r = ROOT.TH1F('Average_r', 'Average r in each of the bins (B0 and B0bar)', int(r_size - 2),
94  r_subsample)
95  histo_avr_rB0 = ROOT.TH1F('Average_rB0', 'Average r in each of the bins (B0)', int(r_size - 2),
96  r_subsample)
97  histo_avr_rB0bar = ROOT.TH1F('Average_rB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
98  r_subsample)
99 
100  # histogram containing the wrong tags calculated by counting Wrong tags
101  histo_mc_NwB0 = ROOT.TH1F('mc_NwB0', 'Average r in each of the bins (B0)', int(r_size - 2),
102  r_subsample)
103  histo_mc_NwB0bar = ROOT.TH1F('mc_NwB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
104  r_subsample)
105 
106  # histogram contains the mean squared of r in each of 7 bins -> calculation see below
107  histo_ms_r = ROOT.TH1F('MS_r', 'Mean squared average of r in each of the bins (B0 and B0bar)', int(r_size - 2),
108  r_subsample)
109  histo_ms_rB0 = ROOT.TH1F('MS_rB0', 'Mean squared average of r in each of the bins (B0)', int(r_size - 2),
110  r_subsample)
111  histo_ms_rB0bar = ROOT.TH1F('MS_rB0bar', 'Mean squared average of r in each of the bins (B0bar)', int(r_size - 2),
112  r_subsample)
113 
114  # histogram with number of entries in for each bin
115  histo_entries_per_bin = ROOT.TH1F(
116  'entries_per_bin',
117  'Events binned in r_subsample according to their r-value for B0 and B0bar prob',
118  int(r_size - 2),
119  r_subsample)
120  histo_entries_per_binB0 = ROOT.TH1F('entries_per_binB0', 'Events binned in r_subsample according '
121  'to their r-value for B0 prob', int(r_size - 2), r_subsample)
122  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar',
123  'Events binned in r_subsample according to their r-value '
124  'for B0bar prob', int(r_size - 2), r_subsample)
125  # histogram network output (not qr and not r) for true B0 (signal) - not necessary
126  histo_Cnet_output_B0 = ROOT.TH1F('Comb_Net_Output_B0', 'Combiner network output [not equal to r] '
127  'for true B0 (binning 100)', 100, 0.0, 1.0)
128  # histogram network output (not qr and not r) for true B0bar (background) - not necessary
129  histo_Cnet_output_B0bar = ROOT.TH1F('Comb_Net_Output_B0bar', 'Combiner network output [not equal to r] '
130  'for true B0bar (binning 100)', 100, 0.0, 1.0)
131  # histogram containing the belle paper plot (qr-tagger output for true B0)
132  histo_belleplotB0 = ROOT.TH1F('BellePlot_B0',
133  'BellePlot for true B0 (binning 50)', 50,
134  -1.0, 1.0)
135  # histogram containing the belle paper plot (qr-tagger output for true B0bar)
136  histo_belleplotB0bar = ROOT.TH1F('BellePlot_B0Bar',
137  'BellePlot for true B0Bar (binning 50)',
138  50, -1.0, 1.0)
139 
140  histo_notTaggedEvents = ROOT.TH1F('notTaggedEvents',
141  'Histogram for not tagged events',
142  1, -3.0, -1.0)
143 
144  # calibration plot for B0. If we get a linaer line our MC is fine, than the assumption r ~ 1- 2w is reasonable
145  # expectation is, that for B0 calibration plot: qr=0 half B0 and half B0bar, qr = 1 only B0 and qr = -1
146  # no B0. Inverse for B0bar calibration plot
147  histo_calib_B0 = ROOT.TH1F('Calibration_B0', 'CalibrationPlot for true B0', 100, -1.0, 1.0)
148  # calibration plot for B0bar calibration plot
149  histo_calib_B0bar = ROOT.TH1F('Calibration_B0Bar',
150  'CalibrationPlot for true B0Bar', 100, -1.0,
151  1.0)
152  # belle plot with true B0 and B0bars
153  hallo12 = ROOT.TH1F('BellePlot_NoCut', 'BellePlot_NoCut (binning 100)',
154  100, -1.0, 1.0)
155 
156 
157  diag = ROOT.TF1('diag', 'pol1', -1, 1)
158 
159 
160  # histograms for the efficiency calculation in wrong way
161  histo_m0 = ROOT.TH1F('BellePlot_m0',
162  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
163  histo_m1 = ROOT.TH1F('BellePlot_m1',
164  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
165  histo_m2 = ROOT.TH1F('BellePlot_m2',
166  'BellePlot_m for true B0Bar (binning 50)', 50, -1.0,
167  1.0)
168 
169  # filling the histograms
170 
171  tree.Draw(method + '_qrCombined>>BellePlot_B0', 'qrMC == 1')
172  tree.Draw(method + '_qrCombined>>BellePlot_B0Bar', 'qrMC == -1')
173  tree.Draw(method + '_qrCombined>>BellePlot_NoCut', 'abs(qrMC) == 1')
174 
175  tree.Draw(method + '_qrCombined>>Calibration_B0', 'qrMC == 1')
176  tree.Draw(method + '_qrCombined>>Calibration_B0Bar', 'qrMC == -1')
177 
178  tree.Draw(method + '_qrCombined>>notTaggedEvents',
179  'abs(qrMC) == 0 && isSignal == 1 && ' +
180  method + '_qrCombined < -1')
181 
182  # filling histograms wrong efficiency calculation
183  tree.Draw(method + '_qrCombined>>BellePlot_m0',
184  'qrMC == 1 && ' + method + '_qrCombined>0')
185  tree.Draw(method + '_qrCombined>>BellePlot_m1',
186  'qrMC == 1 && ' + method + '_qrCombined<0')
187  tree.Draw(method + '_qrCombined>>BellePlot_m2',
188  'qrMC == -1 && ' + method + '_qrCombined>0 ')
189 
190  # filling with abs(qr) in one of 6 bins with its weight
191  # separate calculation for B0 and B0bar
192 
193  tree.Project('Average_r', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(abs(qrMC) == 1)')
194  tree.Project('Average_rB0', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==1)')
195  tree.Project('Average_rB0bar', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==-1)')
196 
197  tree.Project('MS_r', 'abs(' + method + '_qrCombined)', '(' + method +
198  '_qrCombined*' + method + '_qrCombined)*(abs(qrMC) == 1)')
199  tree.Project('MS_rB0', 'abs(' + method + '_qrCombined)',
200  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==1)')
201  tree.Project('MS_rB0bar', 'abs(' + method + '_qrCombined)',
202  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==-1)')
203 
204  # filling with abs(qr) in one of 6 bins
205  tree.Project('entries_per_bin', 'abs(' + method + '_qrCombined)', 'abs(qrMC) == 1')
206  tree.Project('entries_per_binB0', 'abs(' + method + '_qrCombined)', 'qrMC == 1')
207  tree.Project('entries_per_binB0bar', 'abs(' + method + '_qrCombined)', 'qrMC == -1')
208 
209  # filling histograms with number of wrong tags per r-bin
210  tree.Project('mc_NwB0', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == 1')
211  tree.Project('mc_NwB0bar', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == -1')
212 
213  # producing the average r histograms
214  histo_avr_r.Divide(histo_entries_per_bin)
215  histo_avr_rB0.Divide(histo_entries_per_binB0)
216  histo_avr_rB0bar.Divide(histo_entries_per_binB0bar)
217 
218  histo_ms_r.Divide(histo_entries_per_bin)
219  histo_ms_rB0.Divide(histo_entries_per_binB0)
220  histo_ms_rB0bar.Divide(histo_entries_per_binB0bar)
221 
222  # producing the calibration plots
223  # Errors ok
224  histo_calib_B0.Divide(hallo12)
225  histo_calib_B0bar.Divide(hallo12)
226 
227  # Fit for calibration plot
228  print(' ')
229  print('****************************** CALIBRATION CHECK FOR COMBINER USING ' +
230  method + ' ***************************************')
231  print(' ')
232  print('Fit ploynomial of first order to the calibration plot. Expected value ~0.5')
233  print(' ')
234  histo_calib_B0.Fit(diag, 'TEST')
235  print(' ')
236  print('****************************** MEASURED EFFECTIVE EFFICIENCY FOR COMBINER USING ' +
237  method + ' ***********************************')
238  print('* ' +
239  ' *')
240  # get total number of entries
241  total_tagged = histo_entries_per_bin.GetEntries()
242  total_tagged_B0 = histo_entries_per_binB0.GetEntries()
243  total_tagged_B0bar = histo_entries_per_binB0bar.GetEntries()
244  total_notTagged = histo_notTaggedEvents.GetEntries()
245  total_entries = (total_tagged + total_notTagged)
246  # To a good approximation we assume that half of the not tagged B mesons were B0 (B0bar)
247  total_entriesB0 = (total_tagged_B0 + total_notTagged / 2)
248  total_entriesB0bar = (total_tagged_B0bar + total_notTagged / 2)
249 
250  tagging_eff = total_tagged / (total_tagged + total_notTagged)
251  DeltaTagging_eff = math.sqrt(total_tagged * total_notTagged**2 + total_notTagged * total_tagged**2) / (total_entries**2)
252  tot_eff_effB0 = 0
253  tot_eff_effB0bar = 0
254  average_eff_eff = 0
255  uncertainty_eff_effB0 = 0
256  uncertainty_eff_effB0bar = 0
257  uncertainty_eff_effAverage = 0
258  diff_eff_Uncertainty = 0
259  event_fractionB0 = array('f', [0] * r_size)
260  event_fractionB0bar = array('f', [0] * r_size)
261  event_fractionTotal = array('f', [0] * r_size)
262  event_fractionTotalUncertainty = array('f', [0] * r_size)
263  eventsInBin_B0 = array('f', [0] * r_size)
264  eventsInBin_B0bar = array('f', [0] * r_size)
265  eventsInBin_Total = array('f', [0] * r_size)
266  event_fractionDiff = array('f', [0] * r_size)
267  event_fractionDiffUncertainty = array('f', [0] * r_size)
268  rvalueB0 = array('f', [0] * r_size)
269  rvalueB0bar = array('f', [0] * r_size)
270  rvalueB0Average = array('f', [0] * r_size)
271  rvalueStdB0 = array('f', [0] * r_size)
272  rvalueStdB0bar = array('f', [0] * r_size)
273  rvalueStdB0Average = array('f', [0] * r_size)
274  wvalue = array('f', [0] * r_size)
275  wvalueUncertainty = array('f', [0] * r_size)
276  wvalueB0 = array('f', [0] * r_size)
277  wvalueB0bar = array('f', [0] * r_size)
278  wvalueB0Uncertainty = array('f', [0] * r_size)
279  wvalueB0barUncertainty = array('f', [0] * r_size)
280  wvalueDiff = array('f', [0] * r_size)
281  wvalueDiffUncertainty = array('f', [0] * r_size)
282  entries = array('f', [0] * r_size)
283  entriesB0 = array('f', [0] * r_size)
284  entriesB0bar = array('f', [0] * r_size)
285  iEffEfficiency = array('f', [0] * r_size)
286  iEffEfficiencyUncertainty = array('f', [0] * r_size)
287  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
288  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
289  iEffEfficiencyB0UncertaintyFromOutput = array('f', [0] * r_size)
290  iEffEfficiencyB0barUncertaintyFromOutput = array('f', [0] * r_size)
291 
292  iDeltaEffEfficiency = array('f', [0] * r_size)
293  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
294  muParam = array('f', [0] * r_size)
295  muParamUncertainty = array('f', [0] * r_size)
296  # intervallEff = array('f', [0] * r_size)
297 
298  print('* --> DETERMINATION BASED ON MONTE CARLO ' +
299  ' *')
300  print('* ' +
301  ' *')
302  print('* Note: mu = Delta_Effcy/(2*Efficiency). Needed for CP analysis ' +
303  'together with w and Delta_w *')
304  print('* ' +
305  ' *')
306  print('* ------------------------------------------------------------------' +
307  '-------------------------------------------------- *')
308  print('* r-interval <r> Efficiency Delta_Effcy ' +
309  ' mu w Delta_w *')
310  print('* ------------------------------------------------------------------' +
311  '-------------------------------------------------- *')
312  performance = []
313  for i in range(1, r_size):
314  # get the average r-value
315  entries[i] = histo_entries_per_bin.GetBinContent(i)
316  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
317  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
318  # fraction of events/all events
319 
320  event_fractionB0[i] = entriesB0[i] / total_entriesB0
321  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
322 
323  # event_fractionTotal[i] = (entriesB0[i] + entriesB0bar[i]) / total_entries
324  # event_fractionDiff[i] = (entriesB0[i] - entriesB0bar[i]) / total_entries
325 
326  event_fractionTotal[i] = (event_fractionB0[i] + event_fractionB0bar[i]) / 2
327  event_fractionDiff[i] = event_fractionB0[i] - event_fractionB0bar[i]
328 
329  event_fractionDiffUncertainty[i] = math.sqrt(entriesB0[i] *
330  (total_entriesB0 -
331  entriesB0[i]) /
332  total_entriesB0**3 +
333  entriesB0bar[i] *
334  (total_entriesB0bar -
335  entriesB0bar[i]) /
336  total_entriesB0bar**3)
337 
338  event_fractionTotalUncertainty[i] = event_fractionDiffUncertainty[i] / 2
339 
340  rvalueB0[i] = histo_avr_rB0.GetBinContent(i)
341  rvalueB0bar[i] = histo_avr_rB0bar.GetBinContent(i)
342  rvalueB0Average[i] = histo_avr_r.GetBinContent(i) # (rvalueB0[i] + rvalueB0bar[i]) / 2
343  rvalueStdB0[i] = math.sqrt(histo_ms_rB0.GetBinContent(
344  i) - (histo_avr_rB0.GetBinContent(i))**2) / math.sqrt(entriesB0[i] - 1)
345  rvalueStdB0bar[i] = math.sqrt(histo_ms_rB0bar.GetBinContent(
346  i) - (histo_avr_rB0bar.GetBinContent(i))**2) / math.sqrt(entriesB0bar[i] - 1)
347  rvalueStdB0Average[i] = math.sqrt(rvalueStdB0[i]**2 + rvalueStdB0bar[i]**2) / 2
348  # math.sqrt(histo_ms_r.GetBinContent(i) - (histo_avr_r.GetBinContent(i))**2)
349  # calculate the wrong tag fractin (only true if MC data good)
350 
351  wvalueB0[i] = histo_mc_NwB0.GetBinContent(i) / entriesB0[i] # (1 - rvalueB0[i]) / 2
352  wvalueB0bar[i] = histo_mc_NwB0bar.GetBinContent(i) / entriesB0bar[i] # (1 - rvalueB0bar[i]) / 2
353  wvalueDiff[i] = wvalueB0[i] - wvalueB0bar[i]
354  wvalueB0Uncertainty[i] = math.sqrt(histo_mc_NwB0.GetBinContent(
355  i) * (entriesB0[i] - histo_mc_NwB0.GetBinContent(i)) / (entriesB0[i]**3))
356  wvalueB0barUncertainty[i] = math.sqrt(histo_mc_NwB0bar.GetBinContent(
357  i) * (entriesB0bar[i] - histo_mc_NwB0bar.GetBinContent(i)) / (entriesB0bar[i]**3))
358  # math.sqrt((rvalueStdB0[i] / 2)**2 + (rvalueStdB0bar[i] / 2)**2)
359  wvalueDiffUncertainty[i] = math.sqrt(wvalueB0Uncertainty[i]**2 + wvalueB0barUncertainty[i]**2)
360  wvalue[i] = (wvalueB0[i] + wvalueB0bar[i]) / 2
361  wvalueUncertainty[i] = wvalueDiffUncertainty[i] / 2
362 
363  # Avr Efficiency
364 
365  # iEffEfficiency[i] = (event_fractionB0[i] * rvalueB0[i] * rvalueB0[i] +
366  # event_fractionB0bar[i] * rvalueB0bar[i] * rvalueB0bar[i]) / 2
367 
368  iEffEfficiency[i] = event_fractionTotal[i] * (1 - 2 * wvalue[i])**2
369 
370  iEffEfficiencyUncertainty[i] = (1 - 2 * wvalue[i]) * \
371  math.sqrt((2 * event_fractionTotal[i] * 2 * wvalueUncertainty[i])**2 +
372  (1 - 2 * wvalue[i])**2 * event_fractionTotalUncertainty[i]**2)
373 
374  # iEffEfficiencyUncertainty[i] = rvalueB0Average[i] * \
375  # math.sqrt((2 * total_entries * entries[i] * rvalueStdB0Average[i])**2 +
376  # rvalueB0Average[i]**2 * entries[i] *
377  # (total_entries * (total_entries - entries[i]) +
378  # entries[i] * total_notTagged)) / (total_entries**2)
379 
380  # iEffEfficiencyB0UncertaintyFromOutput[i] = rvalueB0[i] * \
381  # math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
382  # rvalueB0[i]**2 * entriesB0[i] *
383  # total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
384  # iEffEfficiencyB0barUncertaintyFromOutput[i] = rvalueB0bar[i] * \
385  # math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
386  # rvalueB0bar[i]**2 * entriesB0bar[i] *
387  # total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
388 
389  # iEffEfficiencyUncertainty[i] =
390  # math.sqrt(iEffEfficiencyB0UncertaintyFromOutput[i]**2 +
391  # iEffEfficiencyB0barUncertaintyFromOutput[i]**2) / 2
392 
393  # iEffEfficiency[i] = (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
394 
395  average_eff_eff += iEffEfficiency[i]
396  # average_eff_eff += (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
397 
398  # Delta Eff
399 
400  iDeltaEffEfficiency[i] = event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2 - \
401  event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
402 
403  iEffEfficiencyB0Uncertainty[i] = (1 - 2 * wvalueB0[i]) * \
404  math.sqrt((2 * total_entriesB0 * entriesB0[i] * 2 * wvalueB0Uncertainty[i])**2 +
405  (1 - 2 * wvalueB0[i])**2 * entriesB0[i] *
406  total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
407  iEffEfficiencyB0barUncertainty[i] = (1 - 2 * wvalueB0bar[i]) * \
408  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * 2 * wvalueB0barUncertainty[i])**2 +
409  (1 - 2 * wvalueB0bar[i])**2 * entriesB0bar[i] *
410  total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
411 
412  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
413  # iEffEfficiencyUncertainty[i] = iDeltaEffEfficiencyUncertainty[i]/2
414 
415  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
416 
417  # finally calculating the total effective efficiency
418  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2
419  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
420  uncertainty_eff_effAverage = uncertainty_eff_effAverage + iEffEfficiencyUncertainty[i]**2
421  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
422  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
423  muParam[i] = event_fractionDiff[i] / (2 * event_fractionTotal[i])
424  muParamUncertainty[i] = event_fractionDiffUncertainty[i] / (2 * event_fractionTotal[i]) * math.sqrt(muParam[i]**2 + 1)
425 
426  # intervallEff[i] = event_fractionTotal[i] * rvalueB0Average[i] * rvalueB0Average[i]
427  print('* ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + ' ' +
428  '{:.3f}'.format(rvalueB0Average[i]) + ' +- ' + '{:.4f}'.format(rvalueStdB0Average[i]) + ' ' +
429  '{:.4f}'.format(event_fractionTotal[i]) + ' ' +
430  '{: .4f}'.format(event_fractionDiff[i]) + ' +- ' + '{:.4f}'.format(event_fractionDiffUncertainty[i]) + ' ' +
431  '{: .4f}'.format(muParam[i]) + ' +- ' + '{:.4f}'.format(muParamUncertainty[i]) + ' ' +
432  '{:.4f}'.format(wvalue[i]) + ' +- ' + '{:.4f}'.format(wvalueUncertainty[i]) + ' ' +
433  '{: .4f}'.format(wvalueDiff[i]) + ' +- ' + '{:.4f}'.format(wvalueDiffUncertainty[i]) + ' *')
434 
435  # average_eff_eff = (tot_eff_effB0 + tot_eff_effB0bar) / 2
436  uncertainty_eff_effAverage = math.sqrt(uncertainty_eff_effAverage)
437  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
438  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
439  diff_eff = tot_eff_effB0 - tot_eff_effB0bar
440  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
441  print('* --------------------------------------------------------------------------------------------------' +
442  '------------------ *')
443  print('* *')
444  print('* __________________________________________________________________________________________ *')
445  print('* | | *')
446  print('* | TOTAL NUMBER OF TAGGED EVENTS = ' +
447  '{:<24}'.format("%.0f" % total_tagged) + '{:>36}'.format('| *'))
448  print('* | | *')
449  print(
450  '* | TOTAL AVERAGE EFFICIENCY (q=+-1)= ' +
451  '{:.2f}'.format(
452  tagging_eff *
453  100) +
454  " +- " +
455  '{:.2f}'.format(
456  DeltaTagging_eff *
457  100) +
458  ' % | *')
459  print('* | | *')
460  print(
461  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY (q=+-1)= ' +
462  '{:.6f}'.format(
463  average_eff_eff *
464  100) +
465  " +- " +
466  '{:.6f}'.format(
467  uncertainty_eff_effAverage *
468  100) +
469  ' % | *')
470  print('* | | *')
471  print(
472  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY ASYMMETRY (q=+-1)= ' +
473  '{:^9.6f}'.format(
474  diff_eff *
475  100) +
476  " +- " +
477  '{:.6f}'.format(
478  diff_eff_Uncertainty *
479  100) +
480  ' % | *')
481  print('* | | *')
482  print('* | B0-TAGGER TOTAL EFFECTIVE EFFICIENCIES: ' +
483  '{:.2f}'.format(tot_eff_effB0 * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0 * 100) +
484  ' % (q=+1) ' +
485  '{:.2f}'.format(tot_eff_effB0bar * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0bar * 100) +
486  ' % (q=-1) ' + ' | *')
487  print('* | | *')
488  print('* | FLAVOR PERCENTAGE (MC): ' +
489  '{:.2f}'.format(total_tagged_B0 / total_tagged * 100) + ' % (q=+1) ' +
490  '{:.2f}'.format(total_tagged_B0bar / total_tagged * 100) + ' % (q=-1) Diff=' +
491  '{:^5.2f}'.format((total_tagged_B0 - total_tagged_B0bar) / total_tagged * 100) + ' % | *')
492  print('* |__________________________________________________________________________________________| *')
493  print('* *')
494  print('****************************************************************************************************')
495  print('* *')
496 
497  # not that imortant
498  print('* --------------------------------- *')
499  print('* Efficiency Determination - easiest way *')
500  print('* --------------------------------- *')
501  total_tagged_B0 = histo_belleplotB0.GetEntries()
502  total_tagged_B0Bar = histo_belleplotB0bar.GetEntries()
503  total_tagged_wrong = histo_m1.GetEntries()
504  total_tagged_B0Bar_wrong = histo_m2.GetEntries()
505  total_tagged = total_tagged_B0 + total_tagged_B0Bar
506  total_tagged_wrong = total_tagged_wrong + total_tagged_B0Bar_wrong
507 
508  wrong_tag_fraction_B0 = total_tagged_wrong / total_tagged_B0
509  wrong_tag_fraction_B0Bar = total_tagged_B0Bar_wrong / total_tagged_B0Bar
510  wrong_tag_fraction = total_tagged_wrong / total_tagged
511  right_tag_fraction_B0 = 1 - 2 * wrong_tag_fraction_B0
512  right_tag_fraction_B0Bar = 1 - 2 * wrong_tag_fraction_B0Bar
513  right_tag_fraction = 1 - 2 * wrong_tag_fraction
514  wrong_eff_B0 = right_tag_fraction_B0 * right_tag_fraction_B0
515  wrong_eff_B0Bar = right_tag_fraction_B0Bar * right_tag_fraction_B0Bar
516  wrong_eff = right_tag_fraction * right_tag_fraction
517 
518  print('* wrong_tag_fraction for all: ' +
519  '{:.3f}'.format(wrong_tag_fraction * 100) +
520  ' % *')
521  print('* right_tag_fraction for all: ' +
522  '{:.3f}'.format(right_tag_fraction * 100) +
523  ' % *')
524  print('* wrong calculated eff all: ' + '{:.3f}'.format(wrong_eff * 100) +
525  ' % *')
526  print('* *')
527  print('****************************************************************************************************')
528  print('')
529  print('Table For B2TIP')
530  print('')
531  # write out the histograms
532  # histo_avr_r.Write('', ROOT.TObject.kOverwrite)
533  # histo_entries_per_bin.Write('', ROOT.TObject.kOverwrite)
534 
535  # histo_Cnet_output_B0.Write('', ROOT.TObject.kOverwrite)
536  # histo_Cnet_output_B0bar.Write('', ROOT.TObject.kOverwrite)
537  # histo_belleplotB0.Write('', ROOT.TObject.kOverwrite)
538  # histo_belleplotB0bar.Write('', ROOT.TObject.kOverwrite)
539  # histo_calib_B0.Write('', ROOT.TObject.kOverwrite)
540  # histo_calib_B0bar.Write('', ROOT.TObject.kOverwrite)
541 
542  maxB0 = histo_belleplotB0.GetBinContent(histo_belleplotB0.GetMaximumBin())
543  maxB0bar = histo_belleplotB0bar.GetBinContent(histo_belleplotB0bar.GetMaximumBin())
544 
545  Ymax = max(maxB0, maxB0bar)
546  Ymax = Ymax + Ymax / 12
547 
548  if YmaxForQrPlot < Ymax:
549  YmaxForQrPlot = Ymax
550 
551  # produce a pdf
552  ROOT.gStyle.SetOptStat(0)
553  Canvas1 = ROOT.TCanvas('Bla' + method, 'Final Output', 1200, 800)
554  Canvas1.cd() # activate
555  histo_belleplotB0.SetFillColorAlpha(ROOT.kBlue, 0.2)
556  histo_belleplotB0.SetFillStyle(1001)
557  histo_belleplotB0.GetYaxis().SetLabelSize(0.03)
558  histo_belleplotB0.GetYaxis().SetLimits(0, YmaxForQrPlot)
559  histo_belleplotB0.GetYaxis().SetTitleOffset(1.2)
560  histo_belleplotB0.SetLineColor(ROOT.kBlue)
561  histo_belleplotB0bar.SetFillColorAlpha(ROOT.kRed, 1.0)
562  histo_belleplotB0bar.SetFillStyle(3005)
563  histo_belleplotB0bar.SetLineColor(ROOT.kRed)
564  # SetLabelSize etc SetTitle
565 
566  histo_belleplotB0.SetTitle('Final Flavor Tagger Output; #it{qr}-output ; Events'
567  )
568  histo_belleplotB0.SetMinimum(0)
569  histo_belleplotB0.SetMaximum(YmaxForQrPlot)
570  histo_belleplotB0.Draw('hist')
571  histo_belleplotB0bar.Draw('hist same')
572 
573  leg = ROOT.TLegend(0.75, 0.8, 0.9, 0.9)
574  leg.AddEntry(histo_belleplotB0, 'true B0')
575  leg.AddEntry(histo_belleplotB0bar, 'true B0bar')
576  leg.Draw()
577 
578  Canvas1.Update()
579  # IPython.embed()
580  with Quiet(ROOT.kError):
581  Canvas1.SaveAs(workingDirectory + '/' + 'PIC_Belleplot_both' + method + '.pdf')
582 
583  # produce the nice calibration plot
584  Canvas2 = ROOT.TCanvas('Bla2' + method, 'Calibration plot for true B0', 1200, 800)
585  Canvas2.cd() # activate
586  histo_calib_B0.SetFillColorAlpha(ROOT.kBlue, 0.2)
587  histo_calib_B0.SetFillStyle(1001)
588  histo_calib_B0.GetYaxis().SetTitleOffset(1.2)
589  histo_calib_B0.SetLineColor(ROOT.kBlue)
590 
591  histo_calib_B0.SetTitle('Calibration For True B0; #it{qr}-output ; Calibration '
592  )
593  histo_calib_B0.Draw('hist')
594  diag.Draw('SAME')
595  Canvas2.Update()
596  with Quiet(ROOT.kError):
597  Canvas2.SaveAs(workingDirectory + '/' + 'PIC_Calibration_B0' + method + '.pdf')
598 
599  histo_avr_r.Delete()
600  histo_avr_rB0.Delete()
601  histo_avr_rB0bar.Delete()
602  histo_ms_r.Delete()
603  histo_ms_rB0.Delete()
604  histo_ms_rB0bar.Delete()
605  histo_mc_NwB0.Delete()
606  histo_mc_NwB0bar.Delete()
607  histo_notTaggedEvents.Delete()
608  histo_entries_per_bin.Delete()
609  histo_entries_per_binB0.Delete()
610  histo_entries_per_binB0bar.Delete()
611  histo_Cnet_output_B0.Delete()
612  histo_Cnet_output_B0bar.Delete()
613  histo_belleplotB0.Delete()
614  histo_belleplotB0bar.Delete()
615  histo_calib_B0.Delete()
616  histo_calib_B0bar.Delete()
617  hallo12.Delete()
618  histo_m0.Delete()
619  histo_m1.Delete()
620  histo_m2.Delete()
621  Canvas1.Clear()
622  Canvas2.Clear()
623 
624  print(r'\begin{tabularx}{1\textwidth}{@{}r r r r r r r@{}}')
625  print(r'\hline')
626  print(r'$r$- Interval $\enskip$ & $\varepsilon_i\ $ & $\Delta\varepsilon_i\ $ & $w_i \pm \delta w_i\enskip\; $ ' +
627  r' & $\Delta w_i \pm \delta\Delta w_i $& $\varepsilon_{\text{eff}, i} \pm \delta\varepsilon_{\text{eff}, i}\enskip\, $ ' +
628  r' & $\Delta \varepsilon_{\text{eff}, i} \pm \delta\Delta \varepsilon_{\text{eff}, i}\enskip\, $\\ \hline\hline')
629  for i in range(1, r_size):
630  print('$ ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + '$ & $'
631  '{: 6.1f}'.format(event_fractionTotal[i] * 100) + r'$ & $' +
632  '{: 7.2f}'.format(event_fractionDiff[i] * 100) + r'\;$ & $' +
633  '{: 7.2f}'.format(wvalue[i] * 100) + r" \pm " + '{:2.2f}'.format(wvalueUncertainty[i] * 100) + r'\enskip $ & $' +
634  '{: 7.2f}'.format(wvalueDiff[i] * 100) + r" \pm " +
635  '{:2.2f}'.format(wvalueDiffUncertainty[i] * 100) + r'\enskip $ & $' +
636  '{: 8.4f}'.format(iEffEfficiency[i] * 100) + # + '$ & $' +
637  r" \pm " + '{:2.4f}'.format(iEffEfficiencyUncertainty[i] * 100) + r'\, $ & $' +
638  '{: 6.4f}'.format(iDeltaEffEfficiency[i] * 100) + # +
639  r" \pm " + '{:2.4f}'.format(iDeltaEffEfficiencyUncertainty[i] * 100) +
640  r'\enskip\enskip $ \\ ')
641  print(r'\hline\hline')
642  print(r'\multicolumn{1}{r}{Total} & & \multicolumn{5}{r}{ $\varepsilon_\text{eff} = ' +
643  r'\sum_i \varepsilon_i \cdot \langle 1-2w_i\rangle^2 = ' +
644  '{: 6.2f}'.format(average_eff_eff * 100) + r" \pm " + '{: 6.2f}'.format(uncertainty_eff_effAverage * 100) + r'\enskip\, ')
645  print(r'\Delta \varepsilon_\text{eff} = ' +
646  '{: 6.2f}'.format(diff_eff * 100) + r" \pm " + '{: 6.2f}'.format(diff_eff_Uncertainty * 100) + r'\quad\ $ }' +
647  r' \\ ')
648  print(r'\hline')
649  print(r'\end{tabular}')
650  print('')
651 
652  print('')
653  print('Mu-Values for Table')
654  print('')
655 
656  for i in range(1, r_size):
657  print('$ ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + '$ & $'
658  '{: 7.2f}'.format(muParam[i] * 100) + r" \pm " + '{:2.2f}'.format(muParamUncertainty[i] * 100) + r' $ & ')
659 # ************************************************
660 # DETERMINATION OF INDIVIDUAL EFFECTIVE EFFICIENCY
661 # ************************************************
662 
663 # keep in mind:
664 # the individual efficiency is determined on basis of the combiner training.
665 # Whereas the efficiency is determined on basis of the final expert output.
666 
667 print(ft.getEventLevelParticleLists(usedCategories))
668 
669 
670 print('******************************************* MEASURED EFFECTIVE EFFICIENCY FOR INDIVIDUAL CATEGORIES ' +
671  '**********************************************')
672 print('* ' +
673  ' *')
674 # input: Classifier input from event-level. Output of event-level is recalculated for input on combiner-level.
675 # but is re-evaluated under combiner target. Signal is B0, background is B0Bar.
676 categoriesPerformance = []
677 NbinsCategories = 100
678 for category in usedCategories:
679  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
680  hist_both = ROOT.TH1F('Both_' + category, 'Input Both (B0) ' +
681  category + ' (binning)', NbinsCategories, -1.0, 1.0)
682  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
683  hist_signal = ROOT.TH1F('Signal_' + category, 'Input Signal (B0) ' +
684  category + ' (binning)', NbinsCategories, -1.0, 1.0)
685  # histogram of input variable (only background) - not yet a probability! It's a classifier plot!
686  hist_background = ROOT.TH1F('Background_' + category, 'Input Background (B0bar) ' +
687  category + ' (binning)', NbinsCategories, -1.0, 1.0)
688 
689  # per definition that input is not comparable to the network output, this has to be transformed.
690  # probability output from 0 to 1 (corresponds to net output probability) -> calculation below
691  hist_probB0 = ROOT.TH1F('Probability' + category,
692  'Transformed to probability (B0) (' + category + ')',
693  NbinsCategories, 0.0, 1.0)
694  hist_probB0bar = ROOT.TH1F('ProbabilityB0bar_' + category,
695  'Transformed to probability (B0bar) (' + category + ')',
696  NbinsCategories, 0.0, 1.0)
697  # qp output from -1 to 1 -> transformation below
698  hist_qrB0 = ROOT.TH1F('QR' + category, 'Transformed to qp (B0)(' +
699  category + ')', NbinsCategories, -1.0, 1.0)
700  hist_qrB0bar = ROOT.TH1F('QRB0bar_' + category, 'Transformed to qp (B0bar) (' +
701  category + ')', NbinsCategories, -1.0, 1.0)
702  # histogram for abs(qp), i.e. this histogram contains the r-values -> transformation below
703  # also used to get the number of entries, sorted into 6 bins
704  histo_entries_per_bin = ROOT.TH1F('entries_per_bin_' + category, 'Abs(qp)(B0) (' + category + ')', int(r_size - 2), r_subsample)
705  histo_entries_per_binB0 = ROOT.TH1F('entries_per_bin' + category, 'Abs(qp)(B0) (' +
706  category + ')', int(r_size - 2), r_subsample)
707  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar_' + category,
708  'Abs(qp) (B0bar) (' + category + ')', int(r_size - 2), r_subsample)
709 
710  # histogram contains at the end the average r values -> calculation below
711  # sorted into r bins
712  hist_avr_rB0 = ROOT.TH1F('Average_r' + category, 'Average r for B0' +
713  category, int(r_size - 2), r_subsample)
714  hist_avr_rB0bar = ROOT.TH1F('Average_rB0bar_' + category, 'Average r for B0bar' +
715  category, int(r_size - 2), r_subsample)
716 
717  hist_ms_rB0 = ROOT.TH1F('AverageSqrdR' + category, 'Average r sqrd for B0' +
718  category, int(r_size - 2), r_subsample)
719  hist_ms_rB0bar = ROOT.TH1F('AverageSqrdRB0bar_' + category, 'Average r sqrd for B0bar' +
720  category, int(r_size - 2), r_subsample)
721 
722  # ****** TEST OF CALIBRATION ******
723  # for calibration plot we want to have
724  hist_all = ROOT.TH1F('All_' + category, 'Input Signal (B0) and Background (B0Bar)' +
725  category + ' (binning 50)', 50, 0.0, 1.0)
726  hist_calib_B0 = ROOT.TH1F('Calib_' + category, 'Calibration Plot for true B0' +
727  category + ' (binning 50)', 50, 0.0, 1.0)
728 
729  # fill both
730  if category != "KaonNotWeighted" and category != "LambdaNotWeighted":
731  # if category == 'Lambda' or combinerVariable.find('weighted') == -1:
732  tree.Draw('qp' + category + '>>Both_' + category, 'abs(qrMC) == 1.0')
733  # fill signal
734  tree.Draw('qp' + category + '>>Signal_' + category, 'qrMC == 1.0')
735  # fill background
736  tree.Draw('qp' + category + '>>Background_' + category, 'qrMC == -1.0')
737  tree.Draw('qp' + category + '>>All_' + category, 'qrMC!=0')
738  tree.Draw('qp' + category + '>>Calib_' + category, 'qrMC == 1.0')
739 
740  # elif combinerVariable.find('weighted') != -1:
741  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
742  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Signal_' + category, 'qrMC == 1.0')
743  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Background_' + category, 'qrMC == -1.0')
744  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>All_' + category, 'qrMC!=0')
745  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Calib_' + category, 'qrMC == 1.0')
746  # category = category + 'W'
747 
748  elif category == "KaonNotWeighted":
749  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
750  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Signal_' + category, 'qrMC == 1.0')
751  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Background_' + category, 'qrMC == -1.0')
752  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>All_' + category, 'qrMC!=0')
753  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Calib_' + category, 'qrMC == 1.0')
754 
755  elif category == "LambdaNotWeighted":
756  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
757  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Signal_' + category, 'qrMC == 1.0')
758  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Background_' + category, 'qrMC == -1.0')
759  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>All_' + category, 'qrMC!=0')
760  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Calib_' + category, 'qrMC == 1.0')
761 
762  hist_calib_B0.Divide(hist_all)
763  # ****** produce the input plots from combiner level ******
764 
765  maxSignal = hist_signal.GetBinContent(hist_signal.GetMaximumBin())
766  maxBackground = hist_background.GetBinContent(hist_background.GetMaximumBin())
767 
768  Ymax = max(maxSignal, maxBackground)
769  Ymax = Ymax + Ymax / 12
770 
771  ROOT.gStyle.SetOptStat(0)
772  with Quiet(ROOT.kError):
773  Canvas = ROOT.TCanvas('Bla', 'TITEL BLA', 1200, 800)
774  Canvas.cd() # activate
775  Canvas.SetLogy()
776  hist_signal.SetFillColorAlpha(ROOT.kBlue, 0.2)
777  hist_signal.SetFillStyle(1001)
778  hist_signal.SetTitleSize(0.1)
779  hist_signal.GetXaxis().SetLabelSize(0.04)
780  hist_signal.GetYaxis().SetLabelSize(0.04)
781  hist_signal.GetXaxis().SetTitleSize(0.05)
782  hist_signal.GetYaxis().SetTitleSize(0.05)
783  hist_signal.GetXaxis().SetTitleOffset(0.95)
784  hist_signal.GetYaxis().SetTitleOffset(1.1)
785  hist_signal.GetYaxis().SetLimits(0, Ymax)
786  hist_signal.SetLineColor(ROOT.kBlue)
787  hist_background.SetFillColorAlpha(ROOT.kRed, 1.0)
788  hist_background.SetFillStyle(3005)
789  hist_background.GetYaxis().SetLimits(0, Ymax)
790  hist_background.SetLineColor(ROOT.kRed)
791 
792  hist_signal.SetTitle(category + ' category; #it{qp}-Output ; Events')
793  # hist_signal.SetMinimum(0)
794  hist_signal.SetMaximum(Ymax)
795  # hist_background.SetMinimum(0)
796  hist_background.SetMaximum(Ymax)
797 
798  hist_signal.Draw('hist')
799  hist_background.Draw('hist same')
800 
801  if category == 'MaximumPstar':
802  legend = ROOT.TLegend(0.4, 0.75, 0.6, 0.9)
803  else:
804  legend = ROOT.TLegend(0.6, 0.75, 0.8, 0.9)
805  legend.AddEntry(hist_signal, 'true B0')
806  legend.AddEntry(hist_background, 'true B0bar')
807  legend.SetTextSize(0.05)
808  legend.Draw()
809 
810  Canvas.Update()
811  with Quiet(ROOT.kError):
812  Canvas.SaveAs(workingDirectory + '/' + 'PIC_' + category + '_Input_Combiner.pdf')
813 
814  # ***** TEST OF CALIBRATION ******
815 
816  # initialize some arrays
817  binCounter = int(NbinsCategories + 1)
818  dilutionB02 = array('d', [0] * binCounter)
819  dilutionB0bar2 = array('d', [0] * binCounter)
820  purityB0 = array('d', [0] * binCounter)
821  purityB0bar = array('d', [0] * binCounter)
822  signal = array('d', [0] * binCounter)
823  back = array('d', [0] * binCounter)
824  weight = array('d', [0] * binCounter)
825 
826  for i in range(1, binCounter):
827  # doing the transformation to probabilities
828  signal[i] = hist_signal.GetBinContent(i)
829  back[i] = hist_background.GetBinContent(i)
830  weight[i] = signal[i] + back[i]
831 
832  # avoid dividing by zero
833  if signal[i] + back[i] == 0:
834  purityB0[i] = 0
835  dilutionB02[i] = 0
836  purityB0bar[i] = 0
837  dilutionB0bar2[i] = 0
838  else:
839 
840  purityB0[i] = signal[i] / (signal[i] + back[i])
841  dilutionB02[i] = -1 + 2 * signal[i] / (signal[i] + back[i])
842 
843  purityB0bar[i] = back[i] / (signal[i] + back[i])
844  dilutionB0bar2[i] = -1 + 2 * back[i] / (signal[i] + back[i])
845 
846  # filling histogram with probability from 0 to 1
847  hist_probB0.Fill(purityB0[i], signal[i])
848  hist_probB0bar.Fill(purityB0bar[i], back[i])
849 
850  # filling histogram with qr from -1 to 1
851  hist_qrB0.Fill(dilutionB02[i], signal[i])
852  hist_qrB0bar.Fill(dilutionB0bar2[i], back[i])
853 
854  # filling histogram with abs(qr), i.e. this histogram contains the r-values (not qp)
855  histo_entries_per_binB0.Fill(abs(dilutionB02[i]), signal[i])
856  histo_entries_per_binB0bar.Fill(abs(dilutionB0bar2[i]), back[i])
857  # filling histogram with abs(qr) special weighted - needed for average r calculation
858  hist_avr_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i]) * signal[i])
859  hist_avr_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i]) * back[i])
860  # filling histogram with abs(qr)**2 special weighted - needed for std dev of r calculation
861  hist_ms_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i] * dilutionB02[i]) * signal[i])
862  hist_ms_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i] * dilutionB02[i]) * back[i])
863 
864  # hist_avr_rB0bar contains now the average r-value
865  hist_avr_rB0.Divide(histo_entries_per_binB0)
866  hist_avr_rB0bar.Divide(histo_entries_per_binB0bar)
867 
868  hist_ms_rB0.Divide(histo_entries_per_binB0)
869  hist_ms_rB0bar.Divide(histo_entries_per_binB0bar)
870  # now calculating the efficiency
871 
872  # calculating number of events
873  total_entriesB0 = total_notTagged / 2
874  total_entriesB0bar = total_notTagged / 2
875  for i in range(1, r_size):
876  total_entriesB0 = total_entriesB0 + histo_entries_per_binB0.GetBinContent(i)
877  total_entriesB0bar = total_entriesB0bar + histo_entries_per_binB0bar.GetBinContent(i)
878  # initializing some arrays
879  tot_eff_effB0 = 0
880  tot_eff_effB0bar = 0
881  uncertainty_eff_effB0 = 0
882  uncertainty_eff_effB0bar = 0
883  uncertainty_eff_effAverage = 0
884  diff_eff_Uncertainty = 0
885  event_fractionB0 = array('f', [0] * r_size)
886  event_fractionB0bar = array('f', [0] * r_size)
887  rvalueB0 = array('f', [0] * r_size)
888  rvalueB0bar = array('f', [0] * r_size)
889  rvalueStdB0 = array('f', [0] * r_size)
890  rvalueStdB0bar = array('f', [0] * r_size)
891  # wvalue = array('f', [0] * r_size)
892  entriesBoth = array('f', [0] * r_size)
893  entriesB0 = array('f', [0] * r_size)
894  entriesB0bar = array('f', [0] * r_size)
895  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
896  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
897  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
898 
899  for i in range(1, r_size):
900 
901  entriesBoth[i] = entriesB0bar[i] + entriesB0[i]
902  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
903  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
904  event_fractionB0[i] = entriesB0[i] / total_entriesB0
905  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
906  # print '* Bin ' + str(i) + ' r-value: ' + str(rvalueB0[i]), 'entriesB0: ' +
907  # str(event_fractionB0[i] * 100) + ' % (' + str(entriesB0[i]) + '/' +
908  # str(total_entriesB0) + ')'
909 
910  rvalueB0[i] = hist_avr_rB0.GetBinContent(i)
911  rvalueB0bar[i] = hist_avr_rB0bar.GetBinContent(i)
912 
913  rvalueStdB0[i] = 0
914  rvalueStdB0bar[i] = 0
915 
916  if entriesB0[i] > 1:
917  rvalueStdB0[i] = math.sqrt(abs(hist_ms_rB0.GetBinContent(
918  i) - (hist_avr_rB0.GetBinContent(i))**2)) / math.sqrt(entriesB0[i] - 1)
919 
920  if entriesB0bar[i] > 1:
921  rvalueStdB0bar[i] = math.sqrt(abs(hist_ms_rB0bar.GetBinContent(
922  i) - (hist_avr_rB0bar.GetBinContent(i))**2)) / math.sqrt(entriesB0bar[i] - 1)
923  # wvalue[i] = (1 - rvalueB0[i]) / 2
924 
925  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * rvalueB0[i] \
926  * rvalueB0[i]
927  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * rvalueB0bar[i] \
928  * rvalueB0bar[i]
929 
930  iEffEfficiencyB0Uncertainty[i] = rvalueB0[i] * \
931  math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
932  rvalueB0[i]**2 * entriesB0[i] *
933  (total_entriesB0 * (total_entriesB0 - entriesB0[i]) +
934  entriesB0[i] * total_notTagged)) / (total_entriesB0**2)
935  iEffEfficiencyB0barUncertainty[i] = rvalueB0bar[i] * \
936  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
937  rvalueB0bar[i]**2 * entriesB0bar[i] *
938  (total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i]) +
939  entriesB0bar[i] * total_notTagged)) / (total_entriesB0bar**2)
940 
941  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
942 
943  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
944 
945  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
946  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
947 
948  effDiff = tot_eff_effB0 - tot_eff_effB0bar
949  effAverage = (tot_eff_effB0 + tot_eff_effB0bar) / 2
950 
951  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
952  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
953  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
954  uncertainty_eff_effAverage = diff_eff_Uncertainty / 2
955  print(
956  '{:<25}'.format("* " + category) + ' B0-Eff=' +
957  '{: 8.2f}'.format(tot_eff_effB0 * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0 * 100) +
958  ' %' +
959  ' B0bar-Eff=' +
960  '{: 8.2f}'.format(tot_eff_effB0bar * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0bar * 100) +
961  ' %' +
962  ' EffAverage=' +
963  '{: 8.2f}'.format(effAverage * 100) + " +- " + '{:4.2f}'.format(uncertainty_eff_effAverage * 100) + ' %' +
964  ' EffDiff=' +
965  '{: 8.2f}'.format(effDiff * 100) + " +- " + '{:4.2f}'.format(diff_eff_Uncertainty * 100) + ' % *')
966 
967  # hist_signal.Write('', ROOT.TObject.kOverwrite)
968  # hist_background.Write('', ROOT.TObject.kOverwrite)
969  # hist_probB0.Write('', ROOT.TObject.kOverwrite)
970  # hist_probB0bar.Write('', ROOT.TObject.kOverwrite)
971  # hist_qpB0.Write('', ROOT.TObject.kOverwrite)
972  # hist_qpB0bar.Write('', ROOT.TObject.kOverwrite)
973  # hist_absqpB0.Write('', ROOT.TObject.kOverwrite)
974  # hist_absqpB0bar.Write('', ROOT.TObject.kOverwrite)
975  # hist_avr_rB0.Write('', ROOT.TObject.kOverwrite)
976  # hist_avr_rB0bar.Write('', ROOT.TObject.kOverwrite)
977  # hist_all.Write('', ROOT.TObject.kOverwrite)
978  # hist_calib_B0.Write('', ROOT.TObject.kOverwrite)
979  categoriesPerformance.append((category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty))
980  with Quiet(ROOT.kError):
981  Canvas.Clear()
982 # if average_eff != 0:
983  # print '* -------------------------------------------------------------------------'
984  # print '* ' + '{: > 8.2f}'.format(average_eff * 100) + ' %' \
985  # + '{:>85}'.format('TOTAL' + ' *')
986 
987 print('* ' +
988  ' *')
989 print('**************************************************************************************************************************' +
990  '************************')
991 print('')
992 print('Table For B2TIP')
993 print('')
994 print(r'\begin{tabular}{ l r r }')
995 print(r'\hline')
996 print(r'Categories & $\varepsilon_\text{eff} \pm \delta\varepsilon_\text{eff} $& ' +
997  r'$\Delta\varepsilon_\text{eff} \pm \delta\Delta\varepsilon_\text{eff}$\\ \hline\hline')
998 for (category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty) in categoriesPerformance:
999  print(
1000  '{:<23}'.format(category) +
1001  ' & $' +
1002  '{: 6.2f}'.format(effAverage * 100) + r" \pm " + '{:4.2f}'.format(uncertainty_eff_effAverage * 100) +
1003  ' $ & $' +
1004  '{: 6.2f}'.format(effDiff * 100) + r" \pm " + '{:4.2f}'.format(diff_eff_Uncertainty * 100) +
1005  r'\ \enskip $ \\')
1006 print(r'\hline')
1007 print(r'\end{tabular}')
1008 B2INFO('qp Output Histograms in pdf format saved at: ' + workingDirectory)
1009 with open("categoriesPerformance.pkl", "wb") as f:
1010  pickle.dump(categoriesPerformance, f)