13from datetime
import date
18from typing
import Dict, Union, List, Optional
22from validationfunctions
import get_latest_nightly
26from validationscript
import Script
29def parse_mail_address(obj: Union[str, List[str]]) -> List[str]:
31 Take a string or list
and return list of email addresses that appear
in it
33 if isinstance(obj, str):
34 return re.findall(
r"[\w.-]+@[\w.-]+", obj)
35 elif isinstance(obj, list):
37 re.search(
r"[\w.-]+@[\w.-]+", c).group()
39 if re.search(
r"[\w.-]+@[\w.-]+", c)
is not None
42 raise TypeError(
"must be string or list of strings")
48 Provides functionality to send mails in case of failed scripts / validation
50 The mail data
is built upon instantiation, the `send_mails` method
51 sends the actual mails.
54 def __init__(self, validation, include_expert_plots=False):
56 Initializes an instance of the Mail class from an instance of the
57 Validation
class. Assumes that a comparison json file exists,
58 reads it
and parses it to extract information about failed plots.
59 This information, together
with information about failed scripts,
60 gets stored
in self.mail_data_new. If there
is mail_data.json inside
61 the log folder, its contents get stored
in self.mail_data_old
for
64 @param validation: validation.Validation instance
65 @param include_expert_plots: Should expert plots be included?
77 revision = get_latest_nightly(work_folder)
81 work_folder, [
'reference', revision]
83 with open(comparison_json_file)
as f:
84 comparison_json = json.load(f)
87 old_mail_data_path = os.path.join(
88 self.
_validator.get_log_folder(),
"mail_data.json"
94 with open(old_mail_data_path)
as f:
96 except FileNotFoundError:
98 f
"Could not find old mail_data.json at {old_mail_data_path}.",
105 comparison_json, include_expert_plots=include_expert_plots
110 Looks up all scripts that failed and collects information about them.
111 See :meth:`_create_mail_log`
for the structure of the resulting
118 self.
_validator.get_log_folder(),
"list_of_failed_scripts.log"
121 failed_scripts = f.read().splitlines()
125 for failed_script
in failed_scripts:
128 failed_script = Script.sanitize_file_name(failed_script)
129 script = self.
_validator.get_script_by_name(failed_script)
136 failed_script[
"warnings"] = []
139 failed_script[
"package"] = script.package
140 failed_script[
"rootfile"] =
", ".join(script.input_files)
141 failed_script[
"comparison_text"] =
" -- "
142 failed_script[
"description"] = script.description
145 failed_script[
"comparison_result"] =
"script failed to execute"
147 failed_script[
"file_url"] = os.path.join(
151 script.name_not_sanitized
155 for contact
in parse_mail_address(script.contact):
156 if contact
not in mail_log:
157 mail_log[contact] = {}
158 mail_log[contact][script.name] = failed_script
159 except (KeyError, TypeError):
166 self, comparison, include_expert_plots=False
167 ) -> Dict[str, Dict[str, Dict[str, str]]]:
169 Takes the entire comparison json file, finds all the plots where
170 comparison failed, finds info about failed scripts and saves them
in
171 the following format:
174 "email@address.test" : {
179 "comparison_text": str,
181 "comparison_result": str,
190 The top level ordering
is the email address of the contact to make
191 sure every user gets only one mail
with everything
in it.
196 for package
in comparison[
"packages"]:
197 for plotfile
in package[
"plotfiles"]:
198 for plot
in plotfile[
"plots"]:
199 if not include_expert_plots
and plot[
"is_expert"]:
202 if plot[
"comparison_result"]
in [
"error"]:
204 if set(plot[
"warnings"]) - {
"No reference object"}:
211 "package": plotfile[
"package"],
212 "rootfile": plotfile[
"rootfile"],
213 "comparison_text": plot[
"comparison_text"],
214 "description": plot[
"description"],
215 "comparison_result": plot[
"comparison_result"],
218 set(plot[
"warnings"]) - {
"No reference object"}
221 "file_url": os.path.join(
227 for contact
in parse_mail_address(plot[
"contact"]):
229 if contact
not in mail_log:
231 mail_log[contact] = {}
232 mail_log[contact][plot[
"title"]] = error_data
236 for contact
in failed_scripts:
238 if contact
not in mail_log:
239 mail_log[contact] = failed_scripts[contact]
242 for script
in failed_scripts[contact]:
243 mail_log[contact][script] = failed_scripts[contact][script]
249 mail_log: Dict[str, Dict[str, Dict[str, str]]],
250 old_mail_log: Optional[Dict[str, Dict[str, Dict[str, str]]]],
251 ) -> Dict[str, Dict[str, Dict[str, str]]]:
252 """ Add a new field 'compared_to_yesterday' which takes one of the
253 values 'unchanged' (same revision comparison result
as in yesterday
's
254 mail log, 'new' (new warning/failure),
'changed' (comparison result
256 mail_log_flagged = copy.deepcopy(mail_log)
257 for contact
in mail_log:
258 for plot
in mail_log[contact]:
259 if old_mail_log
is None:
260 mail_log_flagged[contact][plot][
261 "compared_to_yesterday"
263 elif contact
not in old_mail_log:
264 mail_log_flagged[contact][plot][
265 "compared_to_yesterday"
267 elif plot
not in old_mail_log[contact]:
268 mail_log_flagged[contact][plot][
269 "compared_to_yesterday"
272 mail_log[contact][plot][
"comparison_result"]
273 != old_mail_log[contact][plot][
"comparison_result"]
274 or mail_log[contact][plot][
"warnings"]
275 != old_mail_log[contact][plot][
"warnings"]
277 mail_log_flagged[contact][plot][
278 "compared_to_yesterday"
281 mail_log_flagged[contact][plot][
282 "compared_to_yesterday"
284 return mail_log_flagged
289 @param plot_errors: ``_create_mail_log[contact]``.
290 @return True,
if there
is at least one new/changed plot status
292 for plot
in plot_errors:
293 if plot_errors[plot][
"compared_to_yesterday"] !=
"unchanged":
300 Takes a dict (like in _create_mail_log)
and composes a mail body
302 @param incremental (bool): Is this an incremental report
or a full
307 url =
"https://validation.belle2.org/static/validation.html"
312 "You are receiving this email, because additional "
313 "validation plots/scripts (that include you as contact "
314 "person) produced warnings/errors or "
315 "because their warning/error status "
317 "Below is a detailed list of all new/changed offenders:\n\n"
321 "This is a full list of validation plots/scripts that "
322 "produced warnings/errors and include you as contact "
323 "person (sent out once a week).\n\n"
327 "There were problems with the validation of the "
328 "following plots/scripts:\n\n"
331 compared_to_yesterday = plots[plot][
"compared_to_yesterday"]
333 if compared_to_yesterday ==
"unchanged":
337 elif compared_to_yesterday ==
"new":
338 body_plot =
'<b style="color: red;">[NEW]</b><br>'
339 elif compared_to_yesterday ==
"changed":
341 '<b style="color: red;">'
342 "[Warnings/comparison CHANGED]</b><br>"
346 f
'<b style="color: red;">[UNEXPECTED compared_to_yesterday '
347 f
'flag: "{compared_to_yesterday}". Please alert the '
348 f
"validation maintainer.]</b><br>"
352 if plots[plot][
"comparison_result"] ==
"error":
353 errormsg =
"comparison unequal"
354 elif plots[plot][
"comparison_result"] ==
"not_compared":
357 errormsg = plots[plot][
"comparison_result"]
359 body_plot +=
"<b>{plot}</b><br>"
360 body_plot +=
"<b>Package:</b> {package}<br>"
361 body_plot +=
"<b>Rootfile:</b> {rootfile}.root<br>"
362 body_plot +=
"<b>Description:</b> {description}<br>"
363 body_plot +=
"<b>Comparison:</b> {comparison_text}<br>"
365 body_plot += f
"<b>Error:</b> {errormsg}<br>"
366 warnings_str =
", ".join(plots[plot][
"warnings"]).strip()
368 body_plot += f
"<b>Warnings:</b> {warnings_str}<br>"
369 body_plot +=
"<b>Error plot/log file:</b> <a href='{file_url}'>Click me</a><br>"
377 body_plot = body_plot.format(
379 package=plots[plot][
"package"],
380 rootfile=plots[plot][
"rootfile"],
381 description=plots[plot][
"description"],
382 comparison_text=plots[plot][
"comparison_text"],
383 file_url=url.split(
'static')[0]+plots[plot][
"file_url"],
390 f
"You can take a look at the plots/scripts "
391 f
'<a href="{url}">here</a>.'
399 """ Should a full (=non incremental) report be sent?
400 Use case e.g.: Send a full report every Monday.
402 is_monday = date.today().weekday() == 0
404 print(
"Forcing full report because today is Monday.")
410 Send mails to all contacts in self.mail_data_new. If
411 self.mail_data_old
is given, a mail
is only sent
if there are new
413 @param incremental:
True/
False/
None (=automatic). Whether to send a
414 full
or incremental report.
416 if incremental
is None:
419 print(
"Sending full ('Monday') report.")
421 print(
"Sending incremental report.")
431 recipients.append(contact)
448 header =
"Validation: New/changed warnings/errors"
450 header =
"Validation: Monday report"
453 contact.split(
"@")[0], contact, header, body, mood=mood
460 recipients.append(contact)
461 body =
"Your validation plots work fine now!"
463 contact.split(
"@")[0],
465 "Validation confirmation",
470 recipient_string =
"\n".join([f
"* {r}" for r
in recipients])
471 print(f
"Sent mails to the following people: \n{recipient_string}\n")
478 os.path.join(self.
_validator.get_log_folder(),
"mail_data.json"),
Provides functionality to send mails in case of failed scripts / validation plots.
bool _force_full_report()
Dict[str, Dict[str, str]] _create_mail_log_failed_scripts(self)
Looks up all scripts that failed and collects information about them.
_mail_data_old
Yesterday's mail data (generated from comparison_json).
bool _check_if_same(Dict[str, Dict[str, str]] plot_errors)
_validator
Instance of validation.Validation.
def send_all_mails(self, incremental=None)
Dict[str, Dict[str, Dict[str, str]]] _flag_new_failures(Dict[str, Dict[str, Dict[str, str]]] mail_log, Optional[Dict[str, Dict[str, Dict[str, str]]]] old_mail_log)
def __init__(self, validation, include_expert_plots=False)
Initializes an instance of the Mail class from an instance of the Validation class.
_mail_data_new
Current mail data.
Dict[str, Dict[str, Dict[str, str]]] _create_mail_log(self, comparison, include_expert_plots=False)
Takes the entire comparison json file, finds all the plots where comparison failed,...
def _compose_message(plots, incremental=True)
Takes a dict (like in _create_mail_log) and composes a mail body.
def send_mail(name, recipient, subject, text, link=None, link_title=None, mood="normal")
def get_html_plots_tag_comparison_json(output_base_dir, tags)