Belle II Software development
test_full_stack.py
1#!/usr/bin/env python3
2
3
10
11# standard
12import sys
13import subprocess
14import tempfile
15import os
16import time
17import requests
18import traceback
19
20# ours
21import validationserver
22import validationpath
23from validationtestutil import check_execute
24
25# url of the local validation webserver
26validation_url = "http://localhost:8000/"
27
28
29def start_webserver():
30 """
31 Start the validation server process, this will not
32 return. Therefore this function must be started within
33 a new subprocess
34 """
36
37
38def http_post(command, json_args):
39 call_url = validation_url + command
40 print(f"Posting {json_args} to {command}")
41 r = requests.post(call_url, json=json_args)
42 if not r.ok:
43 print(
44 f"REST call {call_url} with arguments {json_args} failed"
45 )
46 print(str(r))
47 return None
48
49 return r
50
51
52def check_for_plotting(revs, tmp_folder):
53 """
54 Checks if creating new plots for a revision combination works
55 :param revs: List of revisions
56 :type revs: List[str]
57 :param tmp_folder: Temporary folder
58 """
59
60 print(f"Trying to recreate plots for revisions {revs}")
61
62 res = http_post("create_comparison", {"revision_list": revs})
63 if not res:
64 return False
65
66 # will be used to check on the progress
67 prog_key = res.json()["progress_key"]
68
69 done = False
70 wait_time = 0.1 # in seconds
71 max_wait_time = 3
72 summed_wait_time = 0
73
74 # check the plot status with the webserver and only exit after a timeout
75 # or if the plot combination has been created
76 while not done:
77 res = http_post("check_comparison_status", {"input": prog_key})
78 if not res:
79 return False
80
81 if res.json():
82 if res.json()["status"] == "complete":
83 # plots are done
84 break
85
86 time.sleep(wait_time)
87 summed_wait_time += wait_time
88 if summed_wait_time > max_wait_time:
89 print(
90 f"Waited for {summed_wait_time} seconds for the requested plots to complete and nothing happened"
91 )
92 return False
93
94 # check if the plots are really present
96 tmp_folder, revs
97 )
99 tmp_folder, revs
100 )
101
102 if not os.path.exists(comp_folder):
103 print(f"Comparison folder {comp_folder} does not exist")
104 return False
105 if not os.path.isfile(comp_json):
106 print(f"Comparison json {comp_json} does not exist")
107 return False
108
109 # just check for one random plot
110 some_plot = os.path.join(
111 comp_folder,
112 "validation-test",
113 "validationTestPlotsB_gaus_histogram.pdf",
114 )
115 if not os.path.isfile(some_plot):
116 print(f"Comparison plot {some_plot} does not exist")
117 return False
118
119 print("Comparison properly created")
120 return True
121
122
123def check_for_content(revs, min_matrix_plots, min_plot_objects):
124 """
125 Checks for the expected content on the validation website
126 """
127 try:
128 import splinter
129 except ImportError:
130 print(
131 "The splinter package is required to run this test. Run 'pip3 "
132 "install splinter' to install"
133 )
134 # don't give an error exit code here to not fail if splinter is not
135 # available
136 return True
137
138 with splinter.Browser() as browser:
139 # Visit URL
140 url = validation_url + "static/validation.html"
141 print("Opening {} to perform checks", url)
142 browser.visit(url)
143
144 if len(browser.title) == 0:
145 print("Validation website cannot be loaded")
146 return False
147
148 found_revs = browser.find_by_css(".revision-label")
149
150 for r in revs:
151 rr = [web_r for web_r in found_revs if web_r.value == r]
152 if len(rr) == 0:
153 print(
154 f"Revsion {r} was not found on validation website. It should be there."
155 )
156 return False
157
158 plot_objects = browser.find_by_css(".object")
159
160 print(
161 "Checking for a minimum number of {} plot objects", min_plot_objects
162 )
163 if len(plot_objects) < min_plot_objects:
164 print(
165 f"Only {len(plot_objects)} plots found, while {min_plot_objects} are expected"
166 )
167 return False
168
169 # click the overview check box
170 checkbox_overview = browser.find_by_id("check_show_overview")
171 # todo: does not work yet, checkbox is directly unchecked again
172 checkbox_overview.check()
173 # found_matrix_plots = browser.find_by_css(".plot_matrix_item")
174
175 # if len(found_matrix_plots) < min_matrix_plots:
176 # print ("Only {} matrix plots found, while {} are expected".format(len(found_matrix_plots), min_matrix_plots))
177 # return False
178
179 return True
180
181
182def main():
183 """
184 Runs two test validations, starts the web server and queries data
185 """
186
187 # fixme: See if we can reenable this test or at least run it locally
188 print("TEST SKIPPED: Not properly runnable on build bot", file=sys.stderr)
189 sys.exit(1)
190 # noinspection PyUnreachableCode
191
192 # only run the test on dev machines with splinter installed. Also for the
193 # tests which don't use splinter, there are currently some connection
194 # problems to the test webserver on the central build system
195 try:
196 import splinter # noqa
197
198 pass
199 except ImportError:
200 print(
201 "TEST SKIPPED: The splinter package is required to run this test."
202 + "Run 'pip3 install splinter' to install",
203 file=sys.stderr,
204 )
205 sys.exit(1)
206
207 success = True
208
209 revs_to_gen = ["stack_test_1", "stack_test_2", "stack_test_3"]
210
211 # create a temporary test folder in order not to interfere with
212 # already existing validation results
213 with tempfile.TemporaryDirectory() as tmpdir:
214
215 # switch to this folder
216 os.chdir(str(tmpdir))
217
218 for r in revs_to_gen:
219 check_execute(f"validate_basf2 --test --tag {r}")
220
221 # make sure the webserver process is terminated in any case
222 try:
223 # start webserver to serve json output files, plots and
224 # interactive website
225 server_process = subprocess.Popen(["run_validation_server"])
226
227 # wait for one second for the server to start
228 time.sleep(2)
229 # check the content of the webserver, will need splinter
230 success = success and check_for_content(
231 revs_to_gen + ["reference"], 7, 7
232 )
233
234 # check if the plott creating triggering works
235 success = success and check_for_plotting(
236 revs_to_gen[:-1], str(tmpdir)
237 )
238 except BaseException:
239 # catch any exceptions so the finally block can terminate the
240 # webserver process properly
241 e = sys.exc_info()[0]
242 # print exception again
243 print(f"Error {e}")
244 print(traceback.format_exc())
245 success = False
246 finally:
247 # send terminate command
248 server_process.terminate()
249 # wait for the webserver to actually terminate
250 server_process.wait()
251
252 if not success:
253 sys.exit(1)
254
255
256if __name__ == "__main__":
257 main()
Definition: main.py:1
def get_html_plots_tag_comparison_json(output_base_dir, tags)
def get_html_plots_tag_comparison_folder(output_base_dir, tags)
def run_server(ip="127.0.0.1", port=8000, parse_command_line=False, open_site=False, dry_run=False)