Belle II Software development
test_full_stack.py
1#!/usr/bin/env python3
2
3
10
11# standard
12import socket
13import sys
14import subprocess
15import tempfile
16import os
17import time
18import requests
19import splinter
20import traceback
21
22# ours
23from b2test_utils import is_ci
24import validationpath
25from validationtestutil import check_execute
26
27# url of the local validation webserver
28validation_url = None # will be set at runtime
29
30
31def http_post(command, json_args, retries=10, delay=1.0):
32 call_url = validation_url + command
33 print(f"Posting {json_args} to {command}")
34 for i in range(retries):
35 try:
36 r = requests.post(call_url, json=json_args, timeout=5)
37 if r.ok:
38 return r
39 except requests.exceptions.ConnectionError:
40 print(f"Server not reachable yet (attempt {i+1}/{retries})")
41 time.sleep(delay)
42 print(f"Failed to reach server at {call_url}")
43 return None
44
45
46def wait_for_port_any(port: int, timeout: float = 30.0):
47 """Wait until something listens on either 127.0.0.1 or ::1."""
48 start = time.time()
49 while time.time() - start < timeout:
50 for host in ("127.0.0.1", "::1"):
51 try:
52 with socket.create_connection((host, port), timeout=1):
53 return host # returns the one that worked
54 except OSError:
55 pass
56 time.sleep(0.1)
57 return None
58
59
60def check_for_plotting(revs, tmp_folder):
61 """
62 Checks if creating new plots for a revision combination works
63 :param revs: List of revisions
64 :type revs: List[str]
65 :param tmp_folder: Temporary folder
66 """
67
68 print(f"Trying to recreate plots for revisions {revs}")
69
70 res = http_post("create_comparison", {"revision_list": revs})
71 if not res:
72 return False
73
74 # will be used to check on the progress
75 prog_key = res.json()["progress_key"]
76
77 # check the plot status with the webserver and only exit after a timeout
78 # or if the plot combination has been created
79
80 # 10 retries × 1 s delay per http_post -> 10 s worst‑case
81 wait_time = 1.0 # in seconds
82 max_wait_time = 15
83 start = time.time()
84 while True:
85 res = http_post("check_comparison_status", {"input": prog_key})
86 if not res:
87 return False
88
89 if res.json():
90 if res.json()["status"] == "complete":
91 # plots are done
92 break
93
94 time.sleep(wait_time)
95 if time.time() - start > max_wait_time:
96 print(
97 f"Waited for {max_wait_time} seconds for the requested plots to complete and nothing happened"
98 )
99 return False
100
101 # check if the plots are really present
103 tmp_folder, revs
104 )
106 tmp_folder, revs
107 )
108
109 if not os.path.exists(comp_folder):
110 print(f"Comparison folder {comp_folder} does not exist")
111 return False
112 if not os.path.isfile(comp_json):
113 print(f"Comparison json {comp_json} does not exist")
114 return False
115
116 # just check for one random plot
117 some_plot = os.path.join(
118 comp_folder,
119 "validation-test",
120 "validationTestPlots_gaus_histogram.pdf",
121 )
122 if not os.path.isfile(some_plot):
123 print(f"Comparison plot {some_plot} does not exist")
124 return False
125
126 print("Comparison properly created")
127 return True
128
129
130def check_for_content(revs, min_matrix_plots, min_plot_objects):
131 """
132 Checks for the expected content on the validation website
133 """
134 with splinter.Browser("firefox", headless=True) as browser:
135 # Visit URL
136 url = validation_url + "static/validation.html"
137 print(f"Opening {url} to perform checks")
138 browser.visit(url)
139
140 if len(browser.title) == 0:
141 print("Validation website cannot be loaded")
142 return False
143
144 found_revs = browser.find_by_css(".revision-label")
145
146 for r in revs:
147 rr = [web_r for web_r in found_revs if web_r.value == r]
148 if len(rr) == 0:
149 print(
150 f"Revision {r} was not found on validation website. It should be there."
151 )
152 return False
153
154 plot_objects = browser.find_by_css(".object")
155
156 print(f"Checking for a minimum number of {min_plot_objects} plot objects")
157 if len(plot_objects) < min_plot_objects:
158 print(
159 f"Only {len(plot_objects)} plots found, while {min_plot_objects} are expected"
160 )
161 return False
162
163 # click the overview check box
164 checkbox_overview = browser.find_by_id("check_show_overview")
165 # todo: does not work yet, checkbox is directly unchecked again
166 checkbox_overview.check()
167 found_matrix_plots = browser.find_by_css(".plot_matrix_item")
168
169 if len(found_matrix_plots) < min_matrix_plots:
170 print(f"Only {len(found_matrix_plots)} matrix plots found, while {min_matrix_plots} are expected")
171 return False
172
173 return True
174
175
176def main():
177 """
178 Runs two test validations, starts the web server and queries data
179 """
180
181 success = True
182
183 revs_to_gen = ["stack_test_1", "stack_test_2", "stack_test_3"]
184
185 # create a temporary test folder in order not to interfere with
186 # already existing validation results
187 with tempfile.TemporaryDirectory() as tmpdir:
188
189 # switch to this folder
190 os.chdir(str(tmpdir))
191
192 for r in revs_to_gen:
193 check_execute(f"b2validation -p 4 --test --tag {r}")
194
195 # make sure the webserver process is terminated in any case
196 try:
197 # start webserver to serve json output files, plots and
198 # interactive website
199 # try IPv6 first; if that fails, fall back to IPv4.
200 try:
201 server_process = subprocess.Popen(["b2validation-server", "--ip", "::"])
202 except OSError:
203 server_process = subprocess.Popen(["b2validation-server", "--ip", "127.0.0.1"])
204
205 # wait for up to 30 seconds for the server to start
206 active_host = wait_for_port_any(8000, timeout=30)
207 if not active_host:
208 print("Validation server did not start within 30 s")
209 server_process.terminate()
210 sys.exit(1)
211
212 # build URL from the address that really worked
213 global validation_url
214 if ":" in active_host:
215 validation_url = f"http://[{active_host}]:8000/"
216 else:
217 validation_url = f"http://{active_host}:8000/"
218
219 # check the content of the webserver if not running in GitLab pipeline
220 if not is_ci():
221 success = success and check_for_content(
222 revs_to_gen + ["reference"], 7, 7
223 )
224
225 # check if the plott creating triggering works
226 success = success and check_for_plotting(
227 revs_to_gen[:-1], str(tmpdir)
228 )
229 except BaseException:
230 # catch any exceptions so the finally block can terminate the
231 # webserver process properly
232 e = sys.exc_info()[0]
233 # print exception again
234 print(f"Error {e}")
235 print(traceback.format_exc())
236 success = False
237 finally:
238 # send terminate command
239 server_process.terminate()
240 # wait for the webserver to actually terminate
241 server_process.wait()
242
243 if not success:
244 sys.exit(1)
245
246
247if __name__ == "__main__":
248 main()
Definition main.py:1
get_html_plots_tag_comparison_json(output_base_dir, tags)
get_html_plots_tag_comparison_folder(output_base_dir, tags)