Belle II Software  release-05-01-25
flavorTaggerEfficiency.py
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 
4 # ************** Flavor Tagging **************
5 # * Authors: Moritz Gelb & Fernando Abudinen *
6 # * *
7 # * This script calculates the paramters charac-*
8 # * terizing the performance of the flavor tag- *
9 # * ger. It produces as well plots for the qr *
10 # * distribution of the combiners and the dis- *
11 # * tributions of the combiner input variables. *
12 # *
13 # ***********************************************
14 
15 import ROOT
16 from basf2 import B2INFO
17 import flavorTagger as ft
18 from defaultEvaluationParameters import categories, Quiet, r_subsample, r_size
19 from array import array
20 import pickle
21 import math
22 import glob
23 import sys
24 
25 ROOT.gROOT.SetBatch(True)
26 
27 if len(sys.argv) != 3:
28  sys.exit("Must provide 2 arguments: [input_sim_file] or ['input_sim_file*'] with wildcards and [treeName]"
29  )
30 workingFile = sys.argv[1]
31 workingFiles = glob.glob(str(workingFile))
32 treeName = str(sys.argv[2])
33 
34 if len(workingFiles) < 1:
35  sys.exit("No file name or file names " + str(workingFile) + " found.")
36 
37 
38 workingDirectory = '.'
39 
40 #
41 # *****************************************
42 # DETERMINATION OF TOTAL EFFECTIVE EFFIENCY
43 # *****************************************
44 #
45 
46 # working directory
47 # needs the B0bar_final.root-file
48 # treeName = 'B0tree'
49 
50 
51 methods = []
52 
53 tree = ROOT.TChain(treeName)
54 
55 mcstatus = array('d', [-511.5, 0.0, 511.5])
56 ROOT.TH1.SetDefaultSumw2()
57 
58 for iFile in workingFiles:
59  tree.AddFile(iFile)
60 
61 totalBranches = []
62 for branch in tree.GetListOfBranches():
63  totalBranches.append(branch.GetName())
64 
65 if 'FBDT_qrCombined' in totalBranches:
66  methods.append("FBDT")
67 
68 if 'FANN_qrCombined' in totalBranches:
69  methods.append("FANN")
70 
71 if 'DNN_qrCombined' in totalBranches:
72  methods.append("DNN")
73 
74 usedCategories = []
75 for cat in categories:
76  catBranch = 'qp' + cat
77  if catBranch in totalBranches:
78  usedCategories.append(cat)
79 
80 if len(usedCategories) > 1:
81  ft.WhichCategories(usedCategories)
82 
83 YmaxForQrPlot = 0
84 
85 total_notTagged = 0
86 
87 for method in methods:
88  # Get erro with GetBinError(), set erro with SetBinError()
89  # histogram contains the average r in each of 7 bins -> calculation see below
90  histo_avr_r = ROOT.TH1F('Average_r', 'Average r in each of the bins (B0 and B0bar)', int(r_size - 2),
91  r_subsample)
92  histo_avr_rB0 = ROOT.TH1F('Average_rB0', 'Average r in each of the bins (B0)', int(r_size - 2),
93  r_subsample)
94  histo_avr_rB0bar = ROOT.TH1F('Average_rB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
95  r_subsample)
96 
97  # histogram containing the wrong tags calculated by counting Wrong tags
98  histo_mc_NwB0 = ROOT.TH1F('mc_NwB0', 'Average r in each of the bins (B0)', int(r_size - 2),
99  r_subsample)
100  histo_mc_NwB0bar = ROOT.TH1F('mc_NwB0bar', 'Average r in each of the bins (B0bar)', int(r_size - 2),
101  r_subsample)
102 
103  # histogram contains the mean squared of r in each of 7 bins -> calculation see below
104  histo_ms_r = ROOT.TH1F('MS_r', 'Mean squared average of r in each of the bins (B0 and B0bar)', int(r_size - 2),
105  r_subsample)
106  histo_ms_rB0 = ROOT.TH1F('MS_rB0', 'Mean squared average of r in each of the bins (B0)', int(r_size - 2),
107  r_subsample)
108  histo_ms_rB0bar = ROOT.TH1F('MS_rB0bar', 'Mean squared average of r in each of the bins (B0bar)', int(r_size - 2),
109  r_subsample)
110 
111  # histogram with number of entries in for each bin
112  histo_entries_per_bin = ROOT.TH1F(
113  'entries_per_bin',
114  'Events binned in r_subsample according to their r-value for B0 and B0bar prob',
115  int(r_size - 2),
116  r_subsample)
117  histo_entries_per_binB0 = ROOT.TH1F('entries_per_binB0', 'Events binned in r_subsample according '
118  'to their r-value for B0 prob', int(r_size - 2), r_subsample)
119  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar',
120  'Events binned in r_subsample according to their r-value '
121  'for B0bar prob', int(r_size - 2), r_subsample)
122  # histogram network output (not qr and not r) for true B0 (signal) - not necessary
123  histo_Cnet_output_B0 = ROOT.TH1F('Comb_Net_Output_B0', 'Combiner network output [not equal to r] '
124  'for true B0 (binning 100)', 100, 0.0, 1.0)
125  # histogram network output (not qr and not r) for true B0bar (background) - not necessary
126  histo_Cnet_output_B0bar = ROOT.TH1F('Comb_Net_Output_B0bar', 'Combiner network output [not equal to r] '
127  'for true B0bar (binning 100)', 100, 0.0, 1.0)
128  # histogram containing the belle paper plot (qr-tagger output for true B0)
129  histo_belleplotB0 = ROOT.TH1F('BellePlot_B0',
130  'BellePlot for true B0 (binning 50)', 50,
131  -1.0, 1.0)
132  # histogram containing the belle paper plot (qr-tagger output for true B0bar)
133  histo_belleplotB0bar = ROOT.TH1F('BellePlot_B0Bar',
134  'BellePlot for true B0Bar (binning 50)',
135  50, -1.0, 1.0)
136 
137  histo_notTaggedEvents = ROOT.TH1F('notTaggedEvents',
138  'Histogram for not tagged events',
139  1, -3.0, -1.0)
140 
141  # calibration plot for B0. If we get a linaer line our MC is fine, than the assumption r ~ 1- 2w is reasonable
142  # expectation is, that for B0 calibration plot: qr=0 half B0 and half B0bar, qr = 1 only B0 and qr = -1
143  # no B0. Inverse for B0bar calibration plot
144  histo_calib_B0 = ROOT.TH1F('Calibration_B0', 'CalibrationPlot for true B0', 100, -1.0, 1.0)
145  # calibration plot for B0bar calibration plot
146  histo_calib_B0bar = ROOT.TH1F('Calibration_B0Bar',
147  'CalibrationPlot for true B0Bar', 100, -1.0,
148  1.0)
149  # belle plot with true B0 and B0bars
150  hallo12 = ROOT.TH1F('BellePlot_NoCut', 'BellePlot_NoCut (binning 100)',
151  100, -1.0, 1.0)
152 
153 
154  diag = ROOT.TF1('diag', 'pol1', -1, 1)
155 
156 
157  # histograms for the efficiency calculation in wrong way
158  histo_m0 = ROOT.TH1F('BellePlot_m0',
159  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
160  histo_m1 = ROOT.TH1F('BellePlot_m1',
161  'BellePlot_m for true B0 (binning 50)', 50, -1.0, 1.0)
162  histo_m2 = ROOT.TH1F('BellePlot_m2',
163  'BellePlot_m for true B0Bar (binning 50)', 50, -1.0,
164  1.0)
165 
166  # filling the histograms
167 
168  tree.Draw(method + '_qrCombined>>BellePlot_B0', 'qrMC == 1')
169  tree.Draw(method + '_qrCombined>>BellePlot_B0Bar', 'qrMC == -1')
170  tree.Draw(method + '_qrCombined>>BellePlot_NoCut', 'abs(qrMC) == 1')
171 
172  tree.Draw(method + '_qrCombined>>Calibration_B0', 'qrMC == 1')
173  tree.Draw(method + '_qrCombined>>Calibration_B0Bar', 'qrMC == -1')
174 
175  tree.Draw(method + '_qrCombined>>notTaggedEvents',
176  'abs(qrMC) == 0 && isSignal == 1 && ' +
177  method + '_qrCombined < -1')
178 
179  # filling histograms wrong efficiency calculation
180  tree.Draw(method + '_qrCombined>>BellePlot_m0',
181  'qrMC == 1 && ' + method + '_qrCombined>0')
182  tree.Draw(method + '_qrCombined>>BellePlot_m1',
183  'qrMC == 1 && ' + method + '_qrCombined<0')
184  tree.Draw(method + '_qrCombined>>BellePlot_m2',
185  'qrMC == -1 && ' + method + '_qrCombined>0 ')
186 
187  # filling with abs(qr) in one of 6 bins with its weight
188  # separate calculation for B0 and B0bar
189 
190  tree.Project('Average_r', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(abs(qrMC) == 1)')
191  tree.Project('Average_rB0', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==1)')
192  tree.Project('Average_rB0bar', 'abs(' + method + '_qrCombined)', 'abs(' + method + '_qrCombined)*(qrMC==-1)')
193 
194  tree.Project('MS_r', 'abs(' + method + '_qrCombined)', '(' + method +
195  '_qrCombined*' + method + '_qrCombined)*(abs(qrMC) == 1)')
196  tree.Project('MS_rB0', 'abs(' + method + '_qrCombined)',
197  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==1)')
198  tree.Project('MS_rB0bar', 'abs(' + method + '_qrCombined)',
199  '(' + method + '_qrCombined*' + method + '_qrCombined)*(qrMC==-1)')
200 
201  # filling with abs(qr) in one of 6 bins
202  tree.Project('entries_per_bin', 'abs(' + method + '_qrCombined)', 'abs(qrMC) == 1')
203  tree.Project('entries_per_binB0', 'abs(' + method + '_qrCombined)', 'qrMC == 1')
204  tree.Project('entries_per_binB0bar', 'abs(' + method + '_qrCombined)', 'qrMC == -1')
205 
206  # filling histograms with number of wrong tags per r-bin
207  tree.Project('mc_NwB0', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == 1')
208  tree.Project('mc_NwB0bar', 'abs(' + method + '_qrCombined)', ' ' + method + '_qrCombined*qrMC < 0 && qrMC == -1')
209 
210  # producing the average r histograms
211  histo_avr_r.Divide(histo_entries_per_bin)
212  histo_avr_rB0.Divide(histo_entries_per_binB0)
213  histo_avr_rB0bar.Divide(histo_entries_per_binB0bar)
214 
215  histo_ms_r.Divide(histo_entries_per_bin)
216  histo_ms_rB0.Divide(histo_entries_per_binB0)
217  histo_ms_rB0bar.Divide(histo_entries_per_binB0bar)
218 
219  # producing the calibration plots
220  # Errors ok
221  histo_calib_B0.Divide(hallo12)
222  histo_calib_B0bar.Divide(hallo12)
223 
224  # Fit for calibration plot
225  print(' ')
226  print('****************************** CALIBRATION CHECK FOR COMBINER USING ' +
227  method + ' ***************************************')
228  print(' ')
229  print('Fit ploynomial of first order to the calibration plot. Expected value ~0.5')
230  print(' ')
231  histo_calib_B0.Fit(diag, 'TEST')
232  print(' ')
233  print('****************************** MEASURED EFFECTIVE EFFICIENCY FOR COMBINER USING ' +
234  method + ' ***********************************')
235  print('* ' +
236  ' *')
237  # get total number of entries
238  total_tagged = histo_entries_per_bin.GetEntries()
239  total_tagged_B0 = histo_entries_per_binB0.GetEntries()
240  total_tagged_B0bar = histo_entries_per_binB0bar.GetEntries()
241  total_notTagged = histo_notTaggedEvents.GetEntries()
242  total_entries = (total_tagged + total_notTagged)
243  # To a good approximation we assume that half of the not tagged B mesons were B0 (B0bar)
244  total_entriesB0 = (total_tagged_B0 + total_notTagged / 2)
245  total_entriesB0bar = (total_tagged_B0bar + total_notTagged / 2)
246 
247  tagging_eff = total_tagged / (total_tagged + total_notTagged)
248  DeltaTagging_eff = math.sqrt(total_tagged * total_notTagged**2 + total_notTagged * total_tagged**2) / (total_entries**2)
249  tot_eff_effB0 = 0
250  tot_eff_effB0bar = 0
251  average_eff_eff = 0
252  uncertainty_eff_effB0 = 0
253  uncertainty_eff_effB0bar = 0
254  uncertainty_eff_effAverage = 0
255  diff_eff_Uncertainty = 0
256  event_fractionB0 = array('f', [0] * r_size)
257  event_fractionB0bar = array('f', [0] * r_size)
258  event_fractionTotal = array('f', [0] * r_size)
259  event_fractionTotalUncertainty = array('f', [0] * r_size)
260  eventsInBin_B0 = array('f', [0] * r_size)
261  eventsInBin_B0bar = array('f', [0] * r_size)
262  eventsInBin_Total = array('f', [0] * r_size)
263  event_fractionDiff = array('f', [0] * r_size)
264  event_fractionDiffUncertainty = array('f', [0] * r_size)
265  rvalueB0 = array('f', [0] * r_size)
266  rvalueB0bar = array('f', [0] * r_size)
267  rvalueB0Average = array('f', [0] * r_size)
268  rvalueStdB0 = array('f', [0] * r_size)
269  rvalueStdB0bar = array('f', [0] * r_size)
270  rvalueStdB0Average = array('f', [0] * r_size)
271  wvalue = array('f', [0] * r_size)
272  wvalueUncertainty = array('f', [0] * r_size)
273  wvalueB0 = array('f', [0] * r_size)
274  wvalueB0bar = array('f', [0] * r_size)
275  wvalueB0Uncertainty = array('f', [0] * r_size)
276  wvalueB0barUncertainty = array('f', [0] * r_size)
277  wvalueDiff = array('f', [0] * r_size)
278  wvalueDiffUncertainty = array('f', [0] * r_size)
279  entries = array('f', [0] * r_size)
280  entriesB0 = array('f', [0] * r_size)
281  entriesB0bar = array('f', [0] * r_size)
282  iEffEfficiency = array('f', [0] * r_size)
283  iEffEfficiencyUncertainty = array('f', [0] * r_size)
284  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
285  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
286  iEffEfficiencyB0UncertaintyFromOutput = array('f', [0] * r_size)
287  iEffEfficiencyB0barUncertaintyFromOutput = array('f', [0] * r_size)
288 
289  iDeltaEffEfficiency = array('f', [0] * r_size)
290  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
291  muParam = array('f', [0] * r_size)
292  muParamUncertainty = array('f', [0] * r_size)
293  # intervallEff = array('f', [0] * r_size)
294 
295  print('* --> DETERMINATION BASED ON MONTE CARLO ' +
296  ' *')
297  print('* ' +
298  ' *')
299  print('* Note: mu = Delta_Effcy/(2*Efficiency). Needed for CP analysis ' +
300  'together with w and Delta_w *')
301  print('* ' +
302  ' *')
303  print('* ------------------------------------------------------------------' +
304  '-------------------------------------------------- *')
305  print('* r-interval <r> Efficiency Delta_Effcy ' +
306  ' mu w Delta_w *')
307  print('* ------------------------------------------------------------------' +
308  '-------------------------------------------------- *')
309  performance = []
310  for i in range(1, r_size):
311  # get the average r-value
312  entries[i] = histo_entries_per_bin.GetBinContent(i)
313  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
314  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
315  # fraction of events/all events
316 
317  event_fractionB0[i] = entriesB0[i] / total_entriesB0
318  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
319 
320  # event_fractionTotal[i] = (entriesB0[i] + entriesB0bar[i]) / total_entries
321  # event_fractionDiff[i] = (entriesB0[i] - entriesB0bar[i]) / total_entries
322 
323  event_fractionTotal[i] = (event_fractionB0[i] + event_fractionB0bar[i]) / 2
324  event_fractionDiff[i] = event_fractionB0[i] - event_fractionB0bar[i]
325 
326  event_fractionDiffUncertainty[i] = math.sqrt(entriesB0[i] *
327  (total_entriesB0 -
328  entriesB0[i]) /
329  total_entriesB0**3 +
330  entriesB0bar[i] *
331  (total_entriesB0bar -
332  entriesB0bar[i]) /
333  total_entriesB0bar**3)
334 
335  event_fractionTotalUncertainty[i] = event_fractionDiffUncertainty[i] / 2
336 
337  rvalueB0[i] = histo_avr_rB0.GetBinContent(i)
338  rvalueB0bar[i] = histo_avr_rB0bar.GetBinContent(i)
339  rvalueB0Average[i] = histo_avr_r.GetBinContent(i) # (rvalueB0[i] + rvalueB0bar[i]) / 2
340  rvalueStdB0[i] = math.sqrt(histo_ms_rB0.GetBinContent(
341  i) - (histo_avr_rB0.GetBinContent(i))**2) / math.sqrt(entriesB0[i] - 1)
342  rvalueStdB0bar[i] = math.sqrt(histo_ms_rB0bar.GetBinContent(
343  i) - (histo_avr_rB0bar.GetBinContent(i))**2) / math.sqrt(entriesB0bar[i] - 1)
344  rvalueStdB0Average[i] = math.sqrt(rvalueStdB0[i]**2 + rvalueStdB0bar[i]**2) / 2
345  # math.sqrt(histo_ms_r.GetBinContent(i) - (histo_avr_r.GetBinContent(i))**2)
346  # calculate the wrong tag fractin (only true if MC data good)
347 
348  wvalueB0[i] = histo_mc_NwB0.GetBinContent(i) / entriesB0[i] # (1 - rvalueB0[i]) / 2
349  wvalueB0bar[i] = histo_mc_NwB0bar.GetBinContent(i) / entriesB0bar[i] # (1 - rvalueB0bar[i]) / 2
350  wvalueDiff[i] = wvalueB0[i] - wvalueB0bar[i]
351  wvalueB0Uncertainty[i] = math.sqrt(histo_mc_NwB0.GetBinContent(
352  i) * (entriesB0[i] - histo_mc_NwB0.GetBinContent(i)) / (entriesB0[i]**3))
353  wvalueB0barUncertainty[i] = math.sqrt(histo_mc_NwB0bar.GetBinContent(
354  i) * (entriesB0bar[i] - histo_mc_NwB0bar.GetBinContent(i)) / (entriesB0bar[i]**3))
355  # math.sqrt((rvalueStdB0[i] / 2)**2 + (rvalueStdB0bar[i] / 2)**2)
356  wvalueDiffUncertainty[i] = math.sqrt(wvalueB0Uncertainty[i]**2 + wvalueB0barUncertainty[i]**2)
357  wvalue[i] = (wvalueB0[i] + wvalueB0bar[i]) / 2
358  wvalueUncertainty[i] = wvalueDiffUncertainty[i] / 2
359 
360  # Avr Efficiency
361 
362  # iEffEfficiency[i] = (event_fractionB0[i] * rvalueB0[i] * rvalueB0[i] +
363  # event_fractionB0bar[i] * rvalueB0bar[i] * rvalueB0bar[i]) / 2
364 
365  iEffEfficiency[i] = event_fractionTotal[i] * (1 - 2 * wvalue[i])**2
366 
367  iEffEfficiencyUncertainty[i] = (1 - 2 * wvalue[i]) * \
368  math.sqrt((2 * event_fractionTotal[i] * 2 * wvalueUncertainty[i])**2 +
369  (1 - 2 * wvalue[i])**2 * event_fractionTotalUncertainty[i]**2)
370 
371  # iEffEfficiencyUncertainty[i] = rvalueB0Average[i] * \
372  # math.sqrt((2 * total_entries * entries[i] * rvalueStdB0Average[i])**2 +
373  # rvalueB0Average[i]**2 * entries[i] *
374  # (total_entries * (total_entries - entries[i]) +
375  # entries[i] * total_notTagged)) / (total_entries**2)
376 
377  # iEffEfficiencyB0UncertaintyFromOutput[i] = rvalueB0[i] * \
378  # math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
379  # rvalueB0[i]**2 * entriesB0[i] *
380  # total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
381  # iEffEfficiencyB0barUncertaintyFromOutput[i] = rvalueB0bar[i] * \
382  # math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
383  # rvalueB0bar[i]**2 * entriesB0bar[i] *
384  # total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
385 
386  # iEffEfficiencyUncertainty[i] =
387  # math.sqrt(iEffEfficiencyB0UncertaintyFromOutput[i]**2 +
388  # iEffEfficiencyB0barUncertaintyFromOutput[i]**2) / 2
389 
390  # iEffEfficiency[i] = (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
391 
392  average_eff_eff += iEffEfficiency[i]
393  # average_eff_eff += (event_fractionB0[i] * (1-2*wvalueB0[i])**2 + event_fractionB0bar[i] * (1-2*wvalueB0bar[i])**2)/2
394 
395  # Delta Eff
396 
397  iDeltaEffEfficiency[i] = event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2 - \
398  event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
399 
400  iEffEfficiencyB0Uncertainty[i] = (1 - 2 * wvalueB0[i]) * \
401  math.sqrt((2 * total_entriesB0 * entriesB0[i] * 2 * wvalueB0Uncertainty[i])**2 +
402  (1 - 2 * wvalueB0[i])**2 * entriesB0[i] *
403  total_entriesB0 * (total_entriesB0 - entriesB0[i])) / (total_entriesB0**2)
404  iEffEfficiencyB0barUncertainty[i] = (1 - 2 * wvalueB0bar[i]) * \
405  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * 2 * wvalueB0barUncertainty[i])**2 +
406  (1 - 2 * wvalueB0bar[i])**2 * entriesB0bar[i] *
407  total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i])) / (total_entriesB0bar**2)
408 
409  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
410  # iEffEfficiencyUncertainty[i] = iDeltaEffEfficiencyUncertainty[i]/2
411 
412  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
413 
414  # finally calculating the total effective efficiency
415  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * (1 - 2 * wvalueB0[i])**2
416  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * (1 - 2 * wvalueB0bar[i])**2
417  uncertainty_eff_effAverage = uncertainty_eff_effAverage + iEffEfficiencyUncertainty[i]**2
418  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
419  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
420  muParam[i] = event_fractionDiff[i] / (2 * event_fractionTotal[i])
421  muParamUncertainty[i] = event_fractionDiffUncertainty[i] / (2 * event_fractionTotal[i]) * math.sqrt(muParam[i]**2 + 1)
422 
423  # intervallEff[i] = event_fractionTotal[i] * rvalueB0Average[i] * rvalueB0Average[i]
424  print('* ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + ' ' +
425  '{:.3f}'.format(rvalueB0Average[i]) + ' +- ' + '{:.4f}'.format(rvalueStdB0Average[i]) + ' ' +
426  '{:.4f}'.format(event_fractionTotal[i]) + ' ' +
427  '{: .4f}'.format(event_fractionDiff[i]) + ' +- ' + '{:.4f}'.format(event_fractionDiffUncertainty[i]) + ' ' +
428  '{: .4f}'.format(muParam[i]) + ' +- ' + '{:.4f}'.format(muParamUncertainty[i]) + ' ' +
429  '{:.4f}'.format(wvalue[i]) + ' +- ' + '{:.4f}'.format(wvalueUncertainty[i]) + ' ' +
430  '{: .4f}'.format(wvalueDiff[i]) + ' +- ' + '{:.4f}'.format(wvalueDiffUncertainty[i]) + ' *')
431 
432  # average_eff_eff = (tot_eff_effB0 + tot_eff_effB0bar) / 2
433  uncertainty_eff_effAverage = math.sqrt(uncertainty_eff_effAverage)
434  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
435  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
436  diff_eff = tot_eff_effB0 - tot_eff_effB0bar
437  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
438  print('* --------------------------------------------------------------------------------------------------' +
439  '------------------ *')
440  print('* *')
441  print('* __________________________________________________________________________________________ *')
442  print('* | | *')
443  print('* | TOTAL NUMBER OF TAGGED EVENTS = ' +
444  '{:<24}'.format("%.0f" % total_tagged) + '{:>36}'.format('| *'))
445  print('* | | *')
446  print(
447  '* | TOTAL AVERAGE EFFICIENCY (q=+-1)= ' +
448  '{:.2f}'.format(
449  tagging_eff *
450  100) +
451  " +- " +
452  '{:.2f}'.format(
453  DeltaTagging_eff *
454  100) +
455  ' % | *')
456  print('* | | *')
457  print(
458  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY (q=+-1)= ' +
459  '{:.6f}'.format(
460  average_eff_eff *
461  100) +
462  " +- " +
463  '{:.6f}'.format(
464  uncertainty_eff_effAverage *
465  100) +
466  ' % | *')
467  print('* | | *')
468  print(
469  '* | TOTAL AVERAGE EFFECTIVE EFFICIENCY ASYMMETRY (q=+-1)= ' +
470  '{:^9.6f}'.format(
471  diff_eff *
472  100) +
473  " +- " +
474  '{:.6f}'.format(
475  diff_eff_Uncertainty *
476  100) +
477  ' % | *')
478  print('* | | *')
479  print('* | B0-TAGGER TOTAL EFFECTIVE EFFICIENCIES: ' +
480  '{:.2f}'.format(tot_eff_effB0 * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0 * 100) +
481  ' % (q=+1) ' +
482  '{:.2f}'.format(tot_eff_effB0bar * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0bar * 100) +
483  ' % (q=-1) ' + ' | *')
484  print('* | | *')
485  print('* | FLAVOR PERCENTAGE (MC): ' +
486  '{:.2f}'.format(total_tagged_B0 / total_tagged * 100) + ' % (q=+1) ' +
487  '{:.2f}'.format(total_tagged_B0bar / total_tagged * 100) + ' % (q=-1) Diff=' +
488  '{:^5.2f}'.format((total_tagged_B0 - total_tagged_B0bar) / total_tagged * 100) + ' % | *')
489  print('* |__________________________________________________________________________________________| *')
490  print('* *')
491  print('****************************************************************************************************')
492  print('* *')
493 
494  # not that imortant
495  print('* --------------------------------- *')
496  print('* Efficiency Determination - easiest way *')
497  print('* --------------------------------- *')
498  total_tagged_B0 = histo_belleplotB0.GetEntries()
499  total_tagged_B0Bar = histo_belleplotB0bar.GetEntries()
500  total_tagged_wrong = histo_m1.GetEntries()
501  total_tagged_B0Bar_wrong = histo_m2.GetEntries()
502  total_tagged = total_tagged_B0 + total_tagged_B0Bar
503  total_tagged_wrong = total_tagged_wrong + total_tagged_B0Bar_wrong
504 
505  wrong_tag_fraction_B0 = total_tagged_wrong / total_tagged_B0
506  wrong_tag_fraction_B0Bar = total_tagged_B0Bar_wrong / total_tagged_B0Bar
507  wrong_tag_fraction = total_tagged_wrong / total_tagged
508  right_tag_fraction_B0 = 1 - 2 * wrong_tag_fraction_B0
509  right_tag_fraction_B0Bar = 1 - 2 * wrong_tag_fraction_B0Bar
510  right_tag_fraction = 1 - 2 * wrong_tag_fraction
511  wrong_eff_B0 = right_tag_fraction_B0 * right_tag_fraction_B0
512  wrong_eff_B0Bar = right_tag_fraction_B0Bar * right_tag_fraction_B0Bar
513  wrong_eff = right_tag_fraction * right_tag_fraction
514 
515  print('* wrong_tag_fraction for all: ' +
516  '{:.3f}'.format(wrong_tag_fraction * 100) +
517  ' % *')
518  print('* right_tag_fraction for all: ' +
519  '{:.3f}'.format(right_tag_fraction * 100) +
520  ' % *')
521  print('* wrong calculated eff all: ' + '{:.3f}'.format(wrong_eff * 100) +
522  ' % *')
523  print('* *')
524  print('****************************************************************************************************')
525  print('')
526  print('Table For B2TIP')
527  print('')
528  # write out the histograms
529  # histo_avr_r.Write('', ROOT.TObject.kOverwrite)
530  # histo_entries_per_bin.Write('', ROOT.TObject.kOverwrite)
531 
532  # histo_Cnet_output_B0.Write('', ROOT.TObject.kOverwrite)
533  # histo_Cnet_output_B0bar.Write('', ROOT.TObject.kOverwrite)
534  # histo_belleplotB0.Write('', ROOT.TObject.kOverwrite)
535  # histo_belleplotB0bar.Write('', ROOT.TObject.kOverwrite)
536  # histo_calib_B0.Write('', ROOT.TObject.kOverwrite)
537  # histo_calib_B0bar.Write('', ROOT.TObject.kOverwrite)
538 
539  maxB0 = histo_belleplotB0.GetBinContent(histo_belleplotB0.GetMaximumBin())
540  maxB0bar = histo_belleplotB0bar.GetBinContent(histo_belleplotB0bar.GetMaximumBin())
541 
542  Ymax = max(maxB0, maxB0bar)
543  Ymax = Ymax + Ymax / 12
544 
545  if YmaxForQrPlot < Ymax:
546  YmaxForQrPlot = Ymax
547 
548  # produce a pdf
549  ROOT.gStyle.SetOptStat(0)
550  Canvas1 = ROOT.TCanvas('Bla' + method, 'Final Output', 1200, 800)
551  Canvas1.cd() # activate
552  histo_belleplotB0.SetFillColorAlpha(ROOT.kBlue, 0.2)
553  histo_belleplotB0.SetFillStyle(1001)
554  histo_belleplotB0.GetYaxis().SetLabelSize(0.03)
555  histo_belleplotB0.GetYaxis().SetLimits(0, YmaxForQrPlot)
556  histo_belleplotB0.GetYaxis().SetTitleOffset(1.2)
557  histo_belleplotB0.SetLineColor(ROOT.kBlue)
558  histo_belleplotB0bar.SetFillColorAlpha(ROOT.kRed, 1.0)
559  histo_belleplotB0bar.SetFillStyle(3005)
560  histo_belleplotB0bar.SetLineColor(ROOT.kRed)
561  # SetLabelSize etc SetTitle
562 
563  histo_belleplotB0.SetTitle('Final Flavor Tagger Output; #it{qr}-output ; Events'
564  )
565  histo_belleplotB0.SetMinimum(0)
566  histo_belleplotB0.SetMaximum(YmaxForQrPlot)
567  histo_belleplotB0.Draw('hist')
568  histo_belleplotB0bar.Draw('hist same')
569 
570  leg = ROOT.TLegend(0.75, 0.8, 0.9, 0.9)
571  leg.AddEntry(histo_belleplotB0, 'true B0')
572  leg.AddEntry(histo_belleplotB0bar, 'true B0bar')
573  leg.Draw()
574 
575  Canvas1.Update()
576  # IPython.embed()
577  with Quiet(ROOT.kError):
578  Canvas1.SaveAs(workingDirectory + '/' + 'PIC_Belleplot_both' + method + '.pdf')
579 
580  # produce the nice calibration plot
581  Canvas2 = ROOT.TCanvas('Bla2' + method, 'Calibration plot for true B0', 1200, 800)
582  Canvas2.cd() # activate
583  histo_calib_B0.SetFillColorAlpha(ROOT.kBlue, 0.2)
584  histo_calib_B0.SetFillStyle(1001)
585  histo_calib_B0.GetYaxis().SetTitleOffset(1.2)
586  histo_calib_B0.SetLineColor(ROOT.kBlue)
587 
588  histo_calib_B0.SetTitle('Calibration For True B0; #it{qr}-output ; Calibration '
589  )
590  histo_calib_B0.Draw('hist')
591  diag.Draw('SAME')
592  Canvas2.Update()
593  with Quiet(ROOT.kError):
594  Canvas2.SaveAs(workingDirectory + '/' + 'PIC_Calibration_B0' + method + '.pdf')
595 
596  histo_avr_r.Delete()
597  histo_avr_rB0.Delete()
598  histo_avr_rB0bar.Delete()
599  histo_ms_r.Delete()
600  histo_ms_rB0.Delete()
601  histo_ms_rB0bar.Delete()
602  histo_mc_NwB0.Delete()
603  histo_mc_NwB0bar.Delete()
604  histo_notTaggedEvents.Delete()
605  histo_entries_per_bin.Delete()
606  histo_entries_per_binB0.Delete()
607  histo_entries_per_binB0bar.Delete()
608  histo_Cnet_output_B0.Delete()
609  histo_Cnet_output_B0bar.Delete()
610  histo_belleplotB0.Delete()
611  histo_belleplotB0bar.Delete()
612  histo_calib_B0.Delete()
613  histo_calib_B0bar.Delete()
614  hallo12.Delete()
615  histo_m0.Delete()
616  histo_m1.Delete()
617  histo_m2.Delete()
618  Canvas1.Clear()
619  Canvas2.Clear()
620 
621  print(r'\begin{tabularx}{1\textwidth}{@{}r r r r r r r@{}}')
622  print(r'\hline')
623  print(r'$r$- Interval $\enskip$ & $\varepsilon_i\ $ & $\Delta\varepsilon_i\ $ & $w_i \pm \delta w_i\enskip\; $ ' +
624  r' & $\Delta w_i \pm \delta\Delta w_i $& $\varepsilon_{\text{eff}, i} \pm \delta\varepsilon_{\text{eff}, i}\enskip\, $ ' +
625  r' & $\Delta \varepsilon_{\text{eff}, i} \pm \delta\Delta \varepsilon_{\text{eff}, i}\enskip\, $\\ \hline\hline')
626  for i in range(1, r_size):
627  print('$ ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + '$ & $'
628  '{: 6.1f}'.format(event_fractionTotal[i] * 100) + r'$ & $' +
629  '{: 7.2f}'.format(event_fractionDiff[i] * 100) + r'\;$ & $' +
630  '{: 7.2f}'.format(wvalue[i] * 100) + r" \pm " + '{:2.2f}'.format(wvalueUncertainty[i] * 100) + r'\enskip $ & $' +
631  '{: 7.2f}'.format(wvalueDiff[i] * 100) + r" \pm " +
632  '{:2.2f}'.format(wvalueDiffUncertainty[i] * 100) + r'\enskip $ & $' +
633  '{: 8.4f}'.format(iEffEfficiency[i] * 100) + # + '$ & $' +
634  r" \pm " + '{:2.4f}'.format(iEffEfficiencyUncertainty[i] * 100) + r'\, $ & $' +
635  '{: 6.4f}'.format(iDeltaEffEfficiency[i] * 100) + # +
636  r" \pm " + '{:2.4f}'.format(iDeltaEffEfficiencyUncertainty[i] * 100) +
637  r'\enskip\enskip $ \\ ')
638  print(r'\hline\hline')
639  print(r'\multicolumn{1}{r}{Total} & & \multicolumn{5}{r}{ $\varepsilon_\text{eff} = ' +
640  r'\sum_i \varepsilon_i \cdot \langle 1-2w_i\rangle^2 = ' +
641  '{: 6.2f}'.format(average_eff_eff * 100) + r" \pm " + '{: 6.2f}'.format(uncertainty_eff_effAverage * 100) + r'\enskip\, ')
642  print(r'\Delta \varepsilon_\text{eff} = ' +
643  '{: 6.2f}'.format(diff_eff * 100) + r" \pm " + '{: 6.2f}'.format(diff_eff_Uncertainty * 100) + r'\quad\ $ }' +
644  r' \\ ')
645  print(r'\hline')
646  print(r'\end{tabular}')
647  print('')
648 
649  print('')
650  print('Mu-Values for Table')
651  print('')
652 
653  for i in range(1, r_size):
654  print('$ ' + '{:.3f}'.format(r_subsample[i - 1]) + ' - ' + '{:.3f}'.format(r_subsample[i]) + '$ & $'
655  '{: 7.2f}'.format(muParam[i] * 100) + r" \pm " + '{:2.2f}'.format(muParamUncertainty[i] * 100) + r' $ & ')
656 # **********************************************
657 # DETERMINATION OF INDIVIDUAL EFFECTIVE EFFICIENCY
658 # **********************************************
659 
660 # keep in mind:
661 # the individual efficiency is determind on basis of the combiner training.
662 # Whereas the efficiency is determined on basis of the final expert output.
663 # print(eventLevelParticleLists)
664 # eventLevelParticleLists.append(('K+:inRoe', 'KaonNotWeighted',
665 # 'QpOf(K+:inRoe, isRightCategory(Kaon), isRightCategory(Kaon))'))
666 # eventLevelParticleLists.append(('Lambda0:inRoe', 'LambdaNotWeighted',
667 # 'weightedQpOf(Lambda0:inRoe, isRightCategory(Lambda), isRightCategory(Lambda))'))
668 # eventLevelParticleLists.append(('e+:inRoe', 'Electron',
669 # 'weightedQpOf(e+:inRoe, isRightCategory(Electron), isRightCategory(Electron))'))
670 # eventLevelParticleLists.append(
671 # ('e+:inRoe',
672 # 'IntermediateElectron',
673 # 'weightedQpOf(e+:inRoe, isRightCategory(IntermediateElectron), isRightCategory(IntermediateElectron))'))
674 # eventLevelParticleLists.append(('mu+:inRoe', 'Muon',
675 # 'weightedQpOf(e+:inRoe, isRightCategory(Muon), isRightCategory(Muon))'))
676 # eventLevelParticleLists.append(('mu+:inRoe', 'IntermediateMuon',
677 # 'weightedQpOf(e+:inRoe, isRightCategory(IntermediateMuon), isRightCategory(IntermediateMuon))'))
678 # eventLevelParticleLists.append(('mu+:inRoe', 'KinLepton',
679 # 'weightedQpOf(e+:inRoe, isRightCategory(KinLepton), isRightCategory(KinLepton))'))
680 # eventLevelParticleLists.append(
681 # ('mu+:inRoe',
682 # 'IntermediateKinLepton',
683 # 'weightedQpOf(e+:inRoe, isRightCategory(IntermediateKinLepton), isRightCategory(IntermediateKinLepton))'))
684 # eventLevelParticleLists.append(('pi+:inRoe', 'SlowPion',
685 # 'weightedQpOf(e+:inRoe, isRightCategory(MaximumPstar), isRightCategory(MaximumPstar))'))
686 # eventLevelParticleLists.append(('pi+:inRoe', 'FSC',
687 # 'weightedQpOf(e+:inRoe, isRightCategory(SlowPion), isRightCategory(SlowPion))'))
688 # eventLevelParticleLists.append(('pi+:inRoe', 'MaximumPstar',
689 # 'weightedQpOf(e+:inRoe, isRightCategory(FSC), isRightCategory(SlowPion))'))
690 # eventLevelParticleLists.append(('pi+:inRoe', 'FastHadron',
691 # 'weightedQpOf(e+:inRoe, isRightCategory(FastHadron), isRightCategory(FastHadron))'))
692 # eventLevelParticleLists.append(('K+:inRoe', 'KaonPion',
693 # 'weightedQpOf(e+:inRoe, isRightCategory(KaonPion), isRightCategory(Kaon))'))
694 
695 
696 print(ft.eventLevelParticleLists)
697 
698 
699 print('******************************************* MEASURED EFFECTIVE EFFICIENCY FOR INDIVIDUAL CATEGORIES ' +
700  '**********************************************')
701 print('* ' +
702  ' *')
703 # input: Classifier input from event-level. Output of event-level is recalculated for input on combiner-level.
704 # but is re-evaluated under combiner target. Signal is B0, background is B0Bar.
705 categoriesPerformance = []
706 NbinsCategories = 100
707 for (particleList, category, combinerVariable) in ft.eventLevelParticleLists:
708  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
709  hist_both = ROOT.TH1F('Both_' + category, 'Input Both (B0) ' +
710  category + ' (binning)', NbinsCategories, -1.0, 1.0)
711  # histogram of input variable (only signal) - not yet a probability! It's a classifier plot!
712  hist_signal = ROOT.TH1F('Signal_' + category, 'Input Signal (B0) ' +
713  category + ' (binning)', NbinsCategories, -1.0, 1.0)
714  # histogram of input variable (only background) - not yet a probability! It's a classifier plot!
715  hist_background = ROOT.TH1F('Background_' + category, 'Input Background (B0bar) ' +
716  category + ' (binning)', NbinsCategories, -1.0, 1.0)
717 
718  # per definiton that input is not comparable to the network output, this has to be transformed.
719  # probability output from 0 to 1 (corresponds to net output probability) -> calculation below
720  hist_probB0 = ROOT.TH1F('Probability' + category,
721  'Transformed to probability (B0) (' + category + ')',
722  NbinsCategories, 0.0, 1.0)
723  hist_probB0bar = ROOT.TH1F('ProbabilityB0bar_' + category,
724  'Transformed to probability (B0bar) (' + category + ')',
725  NbinsCategories, 0.0, 1.0)
726  # qp output from -1 to 1 -> transformation below
727  hist_qrB0 = ROOT.TH1F('QR' + category, 'Transformed to qp (B0)(' +
728  category + ')', NbinsCategories, -1.0, 1.0)
729  hist_qrB0bar = ROOT.TH1F('QRB0bar_' + category, 'Transformed to qp (B0bar) (' +
730  category + ')', NbinsCategories, -1.0, 1.0)
731  # histogram for abs(qp), i.e. this histogram contains the r-values -> transformation below
732  # also used to get the number of entries, sorted into 6 bins
733  histo_entries_per_bin = ROOT.TH1F('entries_per_bin_' + category, 'Abs(qp)(B0) (' + category + ')', int(r_size - 2), r_subsample)
734  histo_entries_per_binB0 = ROOT.TH1F('entries_per_bin' + category, 'Abs(qp)(B0) (' +
735  category + ')', int(r_size - 2), r_subsample)
736  histo_entries_per_binB0bar = ROOT.TH1F('entries_per_binB0bar_' + category,
737  'Abs(qp) (B0bar) (' + category + ')', int(r_size - 2), r_subsample)
738 
739  # histogram contains at the end the average r values -> calculation below
740  # sorted into r bins
741  hist_avr_rB0 = ROOT.TH1F('Average_r' + category, 'Average r for B0' +
742  category, int(r_size - 2), r_subsample)
743  hist_avr_rB0bar = ROOT.TH1F('Average_rB0bar_' + category, 'Average r for B0bar' +
744  category, int(r_size - 2), r_subsample)
745 
746  hist_ms_rB0 = ROOT.TH1F('AverageSqrdR' + category, 'Average r sqrd for B0' +
747  category, int(r_size - 2), r_subsample)
748  hist_ms_rB0bar = ROOT.TH1F('AverageSqrdRB0bar_' + category, 'Average r sqrd for B0bar' +
749  category, int(r_size - 2), r_subsample)
750 
751  # ****** TEST OF CALIBRATION ******
752  # for calibration plot we want to have
753  hist_all = ROOT.TH1F('All_' + category, 'Input Signal (B0) and Background (B0Bar)' +
754  category + ' (binning 50)', 50, 0.0, 1.0)
755  hist_calib_B0 = ROOT.TH1F('Calib_' + category, 'Calibration Plot for true B0' +
756  category + ' (binning 50)', 50, 0.0, 1.0)
757 
758  # fill both
759  if category != "KaonNotWeighted" and category != "LambdaNotWeighted":
760  # if category == 'Lambda' or combinerVariable.find('weighted') == -1:
761  tree.Draw('qp' + category + '>>Both_' + category, 'abs(qrMC) == 1.0')
762  # fill signal
763  tree.Draw('qp' + category + '>>Signal_' + category, 'qrMC == 1.0')
764  # fill background
765  tree.Draw('qp' + category + '>>Background_' + category, 'qrMC == -1.0')
766  tree.Draw('qp' + category + '>>All_' + category, 'qrMC!=0')
767  tree.Draw('qp' + category + '>>Calib_' + category, 'qrMC == 1.0')
768 
769  # elif combinerVariable.find('weighted') != -1:
770  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
771  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Signal_' + category, 'qrMC == 1.0')
772  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Background_' + category, 'qrMC == -1.0')
773  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>All_' + category, 'qrMC!=0')
774  # tree.Draw('extraInfo__boWeightedQpOf' + category + '__bc' + '>>Calib_' + category, 'qrMC == 1.0')
775  # category = category + 'W'
776 
777  elif category == "KaonNotWeighted":
778  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
779  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Signal_' + category, 'qrMC == 1.0')
780  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Background_' + category, 'qrMC == -1.0')
781  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>All_' + category, 'qrMC!=0')
782  tree.Draw('extraInfo__boQpOfKaon__bc' + '>>Calib_' + category, 'qrMC == 1.0')
783 
784  elif category == "LambdaNotWeighted":
785  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Both_' + category, 'abs(qrMC) == 1.0')
786  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Signal_' + category, 'qrMC == 1.0')
787  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Background_' + category, 'qrMC == -1.0')
788  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>All_' + category, 'qrMC!=0')
789  tree.Draw('extraInfo__boQpOfLambda__bc' + '>>Calib_' + category, 'qrMC == 1.0')
790 
791  hist_calib_B0.Divide(hist_all)
792  # ****** produce the input plots from combiner level ******
793 
794  maxSignal = hist_signal.GetBinContent(hist_signal.GetMaximumBin())
795  maxBackground = hist_background.GetBinContent(hist_background.GetMaximumBin())
796 
797  Ymax = max(maxSignal, maxBackground)
798  Ymax = Ymax + Ymax / 12
799 
800  ROOT.gStyle.SetOptStat(0)
801  with Quiet(ROOT.kError):
802  Canvas = ROOT.TCanvas('Bla', 'TITEL BLA', 1200, 800)
803  Canvas.cd() # activate
804  Canvas.SetLogy()
805  hist_signal.SetFillColorAlpha(ROOT.kBlue, 0.2)
806  hist_signal.SetFillStyle(1001)
807  hist_signal.SetTitleSize(0.1)
808  hist_signal.GetXaxis().SetLabelSize(0.04)
809  hist_signal.GetYaxis().SetLabelSize(0.04)
810  hist_signal.GetXaxis().SetTitleSize(0.05)
811  hist_signal.GetYaxis().SetTitleSize(0.05)
812  hist_signal.GetXaxis().SetTitleOffset(0.95)
813  hist_signal.GetYaxis().SetTitleOffset(1.1)
814  hist_signal.GetYaxis().SetLimits(0, Ymax)
815  hist_signal.SetLineColor(ROOT.kBlue)
816  hist_background.SetFillColorAlpha(ROOT.kRed, 1.0)
817  hist_background.SetFillStyle(3005)
818  hist_background.GetYaxis().SetLimits(0, Ymax)
819  hist_background.SetLineColor(ROOT.kRed)
820 
821  hist_signal.SetTitle(category + ' category; #it{qp}-Output ; Events')
822  # hist_signal.SetMinimum(0)
823  hist_signal.SetMaximum(Ymax)
824  # hist_background.SetMinimum(0)
825  hist_background.SetMaximum(Ymax)
826 
827  hist_signal.Draw('hist')
828  hist_background.Draw('hist same')
829 
830  if category == 'MaximumPstar':
831  l = ROOT.TLegend(0.4, 0.75, 0.6, 0.9)
832  else:
833  l = ROOT.TLegend(0.6, 0.75, 0.8, 0.9)
834  l.AddEntry(hist_signal, 'true B0')
835  l.AddEntry(hist_background, 'true B0bar')
836  l.SetTextSize(0.05)
837  l.Draw()
838 
839  Canvas.Update()
840  with Quiet(ROOT.kError):
841  Canvas.SaveAs(workingDirectory + '/' + 'PIC_' + category + '_Input_Combiner.pdf')
842 
843  # ***** TEST OF CALIBRATION ******
844 
845  # initialize some arrays
846  binCounter = int(NbinsCategories + 1)
847  dilutionB02 = array('d', [0] * binCounter)
848  dilutionB0bar2 = array('d', [0] * binCounter)
849  purityB0 = array('d', [0] * binCounter)
850  purityB0bar = array('d', [0] * binCounter)
851  signal = array('d', [0] * binCounter)
852  back = array('d', [0] * binCounter)
853  weight = array('d', [0] * binCounter)
854 
855  for i in range(1, binCounter):
856  # doing the transformation to probabilities
857  signal[i] = hist_signal.GetBinContent(i)
858  back[i] = hist_background.GetBinContent(i)
859  weight[i] = signal[i] + back[i]
860 
861  # avoid dividing by zero
862  if signal[i] + back[i] == 0:
863  purityB0[i] = 0
864  dilutionB02[i] = 0
865  purityB0bar[i] = 0
866  dilutionB0bar2[i] = 0
867  else:
868 
869  purityB0[i] = signal[i] / (signal[i] + back[i])
870  dilutionB02[i] = -1 + 2 * signal[i] / (signal[i] + back[i])
871 
872  purityB0bar[i] = back[i] / (signal[i] + back[i])
873  dilutionB0bar2[i] = -1 + 2 * back[i] / (signal[i] + back[i])
874 
875  # filling histogram with probabilty from 0 to 1
876  hist_probB0.Fill(purityB0[i], signal[i])
877  hist_probB0bar.Fill(purityB0bar[i], back[i])
878 
879  # filling histogram with qr from -1 to 1
880  hist_qrB0.Fill(dilutionB02[i], signal[i])
881  hist_qrB0bar.Fill(dilutionB0bar2[i], back[i])
882 
883  # filling histogram with abs(qr), i.e. this histogram contains the r-values (not qp)
884  histo_entries_per_binB0.Fill(abs(dilutionB02[i]), signal[i])
885  histo_entries_per_binB0bar.Fill(abs(dilutionB0bar2[i]), back[i])
886  # filling histogram with abs(qr) special weighted - needed for average r calculation
887  hist_avr_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i]) * signal[i])
888  hist_avr_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i]) * back[i])
889  # filling histogram with abs(qr)**2 special weighted - needed for std dev of r calculation
890  hist_ms_rB0.Fill(abs(dilutionB02[i]), abs(dilutionB02[i] * dilutionB02[i]) * signal[i])
891  hist_ms_rB0bar.Fill(abs(dilutionB0bar2[i]), abs(dilutionB0bar2[i] * dilutionB02[i]) * back[i])
892 
893  # hist_avr_rB0bar contains now the average r-value
894  hist_avr_rB0.Divide(histo_entries_per_binB0)
895  hist_avr_rB0bar.Divide(histo_entries_per_binB0bar)
896 
897  hist_ms_rB0.Divide(histo_entries_per_binB0)
898  hist_ms_rB0bar.Divide(histo_entries_per_binB0bar)
899  # now calculating the efficiency
900 
901  # calculating number of events
902  total_entriesB0 = total_notTagged / 2
903  total_entriesB0bar = total_notTagged / 2
904  for i in range(1, r_size):
905  total_entriesB0 = total_entriesB0 + histo_entries_per_binB0.GetBinContent(i)
906  total_entriesB0bar = total_entriesB0bar + histo_entries_per_binB0bar.GetBinContent(i)
907  # initializing some arrays
908  tot_eff_effB0 = 0
909  tot_eff_effB0bar = 0
910  uncertainty_eff_effB0 = 0
911  uncertainty_eff_effB0bar = 0
912  uncertainty_eff_effAverage = 0
913  diff_eff_Uncertainty = 0
914  event_fractionB0 = array('f', [0] * r_size)
915  event_fractionB0bar = array('f', [0] * r_size)
916  rvalueB0 = array('f', [0] * r_size)
917  rvalueB0bar = array('f', [0] * r_size)
918  rvalueStdB0 = array('f', [0] * r_size)
919  rvalueStdB0bar = array('f', [0] * r_size)
920  # wvalue = array('f', [0] * r_size)
921  entriesBoth = array('f', [0] * r_size)
922  entriesB0 = array('f', [0] * r_size)
923  entriesB0bar = array('f', [0] * r_size)
924  iEffEfficiencyB0Uncertainty = array('f', [0] * r_size)
925  iEffEfficiencyB0barUncertainty = array('f', [0] * r_size)
926  iDeltaEffEfficiencyUncertainty = array('f', [0] * r_size)
927 
928  for i in range(1, r_size):
929 
930  entriesBoth[i] = entriesB0bar[i] + entriesB0[i]
931  entriesB0[i] = histo_entries_per_binB0.GetBinContent(i)
932  entriesB0bar[i] = histo_entries_per_binB0bar.GetBinContent(i)
933  event_fractionB0[i] = entriesB0[i] / total_entriesB0
934  event_fractionB0bar[i] = entriesB0bar[i] / total_entriesB0bar
935  # print '* Bin ' + str(i) + ' r-value: ' + str(rvalueB0[i]), 'entriesB0: ' +
936  # str(event_fractionB0[i] * 100) + ' % (' + str(entriesB0[i]) + '/' +
937  # str(total_entriesB0) + ')'
938 
939  rvalueB0[i] = hist_avr_rB0.GetBinContent(i)
940  rvalueB0bar[i] = hist_avr_rB0bar.GetBinContent(i)
941 
942  rvalueStdB0[i] = 0
943  rvalueStdB0bar[i] = 0
944 
945  if entriesB0[i] > 1:
946  rvalueStdB0[i] = math.sqrt(abs(hist_ms_rB0.GetBinContent(
947  i) - (hist_avr_rB0.GetBinContent(i))**2)) / math.sqrt(entriesB0[i] - 1)
948 
949  if entriesB0bar[i] > 1:
950  rvalueStdB0bar[i] = math.sqrt(abs(hist_ms_rB0bar.GetBinContent(
951  i) - (hist_avr_rB0bar.GetBinContent(i))**2)) / math.sqrt(entriesB0bar[i] - 1)
952  # wvalue[i] = (1 - rvalueB0[i]) / 2
953 
954  tot_eff_effB0 = tot_eff_effB0 + event_fractionB0[i] * rvalueB0[i] \
955  * rvalueB0[i]
956  tot_eff_effB0bar = tot_eff_effB0bar + event_fractionB0bar[i] * rvalueB0bar[i] \
957  * rvalueB0bar[i]
958 
959  iEffEfficiencyB0Uncertainty[i] = rvalueB0[i] * \
960  math.sqrt((2 * total_entriesB0 * entriesB0[i] * rvalueStdB0[i])**2 +
961  rvalueB0[i]**2 * entriesB0[i] *
962  (total_entriesB0 * (total_entriesB0 - entriesB0[i]) +
963  entriesB0[i] * total_notTagged)) / (total_entriesB0**2)
964  iEffEfficiencyB0barUncertainty[i] = rvalueB0bar[i] * \
965  math.sqrt((2 * total_entriesB0bar * entriesB0bar[i] * rvalueStdB0bar[i])**2 +
966  rvalueB0bar[i]**2 * entriesB0bar[i] *
967  (total_entriesB0bar * (total_entriesB0bar - entriesB0bar[i]) +
968  entriesB0bar[i] * total_notTagged)) / (total_entriesB0bar**2)
969 
970  iDeltaEffEfficiencyUncertainty[i] = math.sqrt(iEffEfficiencyB0Uncertainty[i]**2 + iEffEfficiencyB0barUncertainty[i]**2)
971 
972  diff_eff_Uncertainty = diff_eff_Uncertainty + iDeltaEffEfficiencyUncertainty[i]**2
973 
974  uncertainty_eff_effB0 = uncertainty_eff_effB0 + iEffEfficiencyB0Uncertainty[i]**2
975  uncertainty_eff_effB0bar = uncertainty_eff_effB0bar + iEffEfficiencyB0barUncertainty[i]**2
976 
977  effDiff = tot_eff_effB0 - tot_eff_effB0bar
978  effAverage = (tot_eff_effB0 + tot_eff_effB0bar) / 2
979 
980  uncertainty_eff_effB0 = math.sqrt(uncertainty_eff_effB0)
981  uncertainty_eff_effB0bar = math.sqrt(uncertainty_eff_effB0bar)
982  diff_eff_Uncertainty = math.sqrt(diff_eff_Uncertainty)
983  uncertainty_eff_effAverage = diff_eff_Uncertainty / 2
984  print(
985  '{:<25}'.format("* " + category) + ' B0-Eff=' +
986  '{: 8.2f}'.format(tot_eff_effB0 * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0 * 100) +
987  ' %' +
988  ' B0bar-Eff=' +
989  '{: 8.2f}'.format(tot_eff_effB0bar * 100) + " +-" + '{: 4.2f}'.format(uncertainty_eff_effB0bar * 100) +
990  ' %' +
991  ' EffAverage=' +
992  '{: 8.2f}'.format(effAverage * 100) + " +- " + '{:4.2f}'.format(uncertainty_eff_effAverage * 100) + ' %' +
993  ' EffDiff=' +
994  '{: 8.2f}'.format(effDiff * 100) + " +- " + '{:4.2f}'.format(diff_eff_Uncertainty * 100) + ' % *')
995 
996  # hist_signal.Write('', ROOT.TObject.kOverwrite)
997  # hist_background.Write('', ROOT.TObject.kOverwrite)
998  # hist_probB0.Write('', ROOT.TObject.kOverwrite)
999  # hist_probB0bar.Write('', ROOT.TObject.kOverwrite)
1000  # hist_qpB0.Write('', ROOT.TObject.kOverwrite)
1001  # hist_qpB0bar.Write('', ROOT.TObject.kOverwrite)
1002  # hist_absqpB0.Write('', ROOT.TObject.kOverwrite)
1003  # hist_absqpB0bar.Write('', ROOT.TObject.kOverwrite)
1004  # hist_avr_rB0.Write('', ROOT.TObject.kOverwrite)
1005  # hist_avr_rB0bar.Write('', ROOT.TObject.kOverwrite)
1006  # hist_all.Write('', ROOT.TObject.kOverwrite)
1007  # hist_calib_B0.Write('', ROOT.TObject.kOverwrite)
1008  categoriesPerformance.append((category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty))
1009  with Quiet(ROOT.kError):
1010  Canvas.Clear()
1011 # if average_eff != 0:
1012  # print '* -------------------------------------------------------------------------'
1013  # print '* ' + '{: > 8.2f}'.format(average_eff * 100) + ' %' \
1014  # + '{:>85}'.format('TOTAL' + ' *')
1015 
1016 print('* ' +
1017  ' *')
1018 print('**************************************************************************************************************************' +
1019  '************************')
1020 print('')
1021 print('Table For B2TIP')
1022 print('')
1023 print(r'\begin{tabular}{ l r r }')
1024 print(r'\hline')
1025 print(r'Categories & $\varepsilon_\text{eff} \pm \delta\varepsilon_\text{eff} $& ' +
1026  r'$\Delta\varepsilon_\text{eff} \pm \delta\Delta\varepsilon_\text{eff}$\\ \hline\hline')
1027 for (category, effAverage, uncertainty_eff_effAverage, effDiff, diff_eff_Uncertainty) in categoriesPerformance:
1028  print(
1029  '{:<23}'.format(category) +
1030  ' & $' +
1031  '{: 6.2f}'.format(effAverage * 100) + r" \pm " + '{:4.2f}'.format(uncertainty_eff_effAverage * 100) +
1032  ' $ & $' +
1033  '{: 6.2f}'.format(effDiff * 100) + r" \pm " + '{:4.2f}'.format(diff_eff_Uncertainty * 100) +
1034  r'\ \enskip $ \\')
1035 print(r'\hline')
1036 print(r'\end{tabular}')
1037 B2INFO('qp Output Histograms in pdf format saved at: ' + workingDirectory)
1038 with open("categoriesPerformance.pkl", "wb") as f:
1039  pickle.dump(categoriesPerformance, f)