Belle II Software development
test_full_stack.py
1#!/usr/bin/env python3
2
3
10
11# standard
12import sys
13import subprocess
14import tempfile
15import os
16import time
17import requests
18import traceback
19
20# ours
21import b2test_utils
22import validationserver
23import validationpath
24from validationtestutil import check_execute
25
26# url of the local validation webserver
27validation_url = "http://localhost:8000/"
28
29
30def start_webserver():
31 """
32 Start the validation server process, this will not
33 return. Therefore this function must be started within
34 a new subprocess
35 """
37
38
39def http_post(command, json_args):
40 call_url = validation_url + command
41 print(f"Posting {json_args} to {command}")
42 r = requests.post(call_url, json=json_args)
43 if not r.ok:
44 print(
45 f"REST call {call_url} with arguments {json_args} failed"
46 )
47 print(str(r))
48 return None
49
50 return r
51
52
53def check_for_plotting(revs, tmp_folder):
54 """
55 Checks if creating new plots for a revision combination works
56 :param revs: List of revisions
57 :type revs: List[str]
58 :param tmp_folder: Temporary folder
59 """
60
61 print(f"Trying to recreate plots for revisions {revs}")
62
63 res = http_post("create_comparison", {"revision_list": revs})
64 if not res:
65 return False
66
67 # will be used to check on the progress
68 prog_key = res.json()["progress_key"]
69
70 done = False
71 wait_time = 0.1 # in seconds
72 max_wait_time = 3
73 summed_wait_time = 0
74
75 # check the plot status with the webserver and only exit after a timeout
76 # or if the plot combination has been created
77 while not done:
78 res = http_post("check_comparison_status", {"input": prog_key})
79 if not res:
80 return False
81
82 if res.json():
83 if res.json()["status"] == "complete":
84 # plots are done
85 break
86
87 time.sleep(wait_time)
88 summed_wait_time += wait_time
89 if summed_wait_time > max_wait_time:
90 print(
91 f"Waited for {summed_wait_time} seconds for the requested plots to complete and nothing happened"
92 )
93 return False
94
95 # check if the plots are really present
97 tmp_folder, revs
98 )
100 tmp_folder, revs
101 )
102
103 if not os.path.exists(comp_folder):
104 print(f"Comparison folder {comp_folder} does not exist")
105 return False
106 if not os.path.isfile(comp_json):
107 print(f"Comparison json {comp_json} does not exist")
108 return False
109
110 # just check for one random plot
111 some_plot = os.path.join(
112 comp_folder,
113 "validation-test",
114 "validationTestPlotsB_gaus_histogram.pdf",
115 )
116 if not os.path.isfile(some_plot):
117 print(f"Comparison plot {some_plot} does not exist")
118 return False
119
120 print("Comparison properly created")
121 return True
122
123
124def check_for_content(revs, min_matrix_plots, min_plot_objects):
125 """
126 Checks for the expected content on the validation website
127 """
128 try:
129 import splinter
130 except ImportError:
131 print(
132 "The splinter package is required to run this test. Run 'pip3 "
133 "install splinter' to install"
134 )
135 # don't give an error exit code here to not fail if splinter is not
136 # available
137 return True
138
139 with splinter.Browser() as browser:
140 # Visit URL
141 url = validation_url + "static/validation.html"
142 print("Opening {} to perform checks", url)
143 browser.visit(url)
144
145 if len(browser.title) == 0:
146 print("Validation website cannot be loaded")
147 return False
148
149 found_revs = browser.find_by_css(".revision-label")
150
151 for r in revs:
152 rr = [web_r for web_r in found_revs if web_r.value == r]
153 if len(rr) == 0:
154 print(
155 f"Revision {r} was not found on validation website. It should be there."
156 )
157 return False
158
159 plot_objects = browser.find_by_css(".object")
160
161 print(
162 "Checking for a minimum number of {} plot objects", min_plot_objects
163 )
164 if len(plot_objects) < min_plot_objects:
165 print(
166 f"Only {len(plot_objects)} plots found, while {min_plot_objects} are expected"
167 )
168 return False
169
170 # click the overview check box
171 checkbox_overview = browser.find_by_id("check_show_overview")
172 # todo: does not work yet, checkbox is directly unchecked again
173 checkbox_overview.check()
174 # found_matrix_plots = browser.find_by_css(".plot_matrix_item")
175
176 # if len(found_matrix_plots) < min_matrix_plots:
177 # print ("Only {} matrix plots found, while {} are expected".format(len(found_matrix_plots), min_matrix_plots))
178 # return False
179
180 return True
181
182
183def main():
184 """
185 Runs two test validations, starts the web server and queries data
186 """
187
188 # fixme: See if we can re-enable this test or at least run it locally
189 b2test_utils.skip_test("Not properly runnable yet")
190 # noinspection PyUnreachableCode
191
192 # only run the test on dev machines with splinter installed. Also for the
193 # tests which don't use splinter, there are currently some connection
194 # problems to the test webserver on the central build system
195 try:
196 import splinter # noqa
197
198 pass
199 except ImportError:
200 print(
201 "TEST SKIPPED: The splinter package is required to run this test."
202 + "Run 'pip3 install splinter' to install",
203 file=sys.stderr,
204 )
205 sys.exit(1)
206
207 success = True
208
209 revs_to_gen = ["stack_test_1", "stack_test_2", "stack_test_3"]
210
211 # create a temporary test folder in order not to interfere with
212 # already existing validation results
213 with tempfile.TemporaryDirectory() as tmpdir:
214
215 # switch to this folder
216 os.chdir(str(tmpdir))
217
218 for r in revs_to_gen:
219 check_execute(f"validate_basf2 --test --tag {r}")
220
221 # make sure the webserver process is terminated in any case
222 try:
223 # start webserver to serve json output files, plots and
224 # interactive website
225 server_process = subprocess.Popen(["run_validation_server"])
226
227 # wait for one second for the server to start
228 time.sleep(2)
229 # check the content of the webserver, will need splinter
230 success = success and check_for_content(
231 revs_to_gen + ["reference"], 7, 7
232 )
233
234 # check if the plott creating triggering works
235 success = success and check_for_plotting(
236 revs_to_gen[:-1], str(tmpdir)
237 )
238 except BaseException:
239 # catch any exceptions so the finally block can terminate the
240 # webserver process properly
241 e = sys.exc_info()[0]
242 # print exception again
243 print(f"Error {e}")
244 print(traceback.format_exc())
245 success = False
246 finally:
247 # send terminate command
248 server_process.terminate()
249 # wait for the webserver to actually terminate
250 server_process.wait()
251
252 if not success:
253 sys.exit(1)
254
255
256if __name__ == "__main__":
257 main()
def skip_test(reason, py_case=None)
Definition: __init__.py:31
Definition: main.py:1
def get_html_plots_tag_comparison_json(output_base_dir, tags)
def get_html_plots_tag_comparison_folder(output_base_dir, tags)
def run_server(ip="127.0.0.1", port=8000, parse_command_line=False, open_site=False, dry_run=False)