Belle II Software  release-08-01-10
test_full_stack.py
1 #!/usr/bin/env python3
2 
3 
10 
11 # standard
12 import sys
13 import subprocess
14 import tempfile
15 import os
16 import time
17 import requests
18 import traceback
19 
20 # ours
21 import validationserver
22 import validationpath
23 from validationtestutil import check_execute
24 
25 # url of the local validation webserver
26 validation_url = "http://localhost:8000/"
27 
28 
29 def start_webserver():
30  """
31  Start the validation server process, this will not
32  return. Therefore this function must be started within
33  a new subprocess
34  """
36 
37 
38 def http_post(command, json_args):
39  call_url = validation_url + command
40  print(f"Posting {json_args} to {command}")
41  r = requests.post(call_url, json=json_args)
42  if not r.ok:
43  print(
44  "REST call {} with arguments {} failed".format(call_url, json_args)
45  )
46  print(str(r))
47  return None
48 
49  return r
50 
51 
52 def check_for_plotting(revs, tmp_folder):
53  """
54  Checks if creating new plots for a revision combination works
55  :param revs: List of revisions
56  :type revs: List[str]
57  :param tmp_folder: Temporary folder
58  """
59 
60  print(f"Trying to recreate plots for revisions {revs}")
61 
62  res = http_post("create_comparison", {"revision_list": revs})
63  if not res:
64  return False
65 
66  # will be used to check on the progress
67  prog_key = res.json()["progress_key"]
68 
69  done = False
70  wait_time = 0.1 # in seconds
71  max_wait_time = 3
72  summed_wait_time = 0
73 
74  # check the plot status with the webserver and only exit after a timeout
75  # or if the plot combination has been created
76  while not done:
77  res = http_post("check_comparison_status", {"input": prog_key})
78  if not res:
79  return False
80 
81  if res.json():
82  if res.json()["status"] == "complete":
83  # plots are done
84  break
85 
86  time.sleep(wait_time)
87  summed_wait_time += wait_time
88  if summed_wait_time > max_wait_time:
89  print(
90  "Waited for {} seconds for the requested plots to complete "
91  "and nothing happened".format(summed_wait_time)
92  )
93  return False
94 
95  # check if the plots are really present
97  tmp_folder, revs
98  )
100  tmp_folder, revs
101  )
102 
103  if not os.path.exists(comp_folder):
104  print(f"Comparison folder {comp_folder} does not exist")
105  return False
106  if not os.path.isfile(comp_json):
107  print(f"Comparison json {comp_json} does not exist")
108  return False
109 
110  # just check for one random plot
111  some_plot = os.path.join(
112  comp_folder,
113  "validation-test",
114  "validationTestPlotsB_gaus_histogram.pdf",
115  )
116  if not os.path.isfile(some_plot):
117  print(f"Comparison plot {some_plot} does not exist")
118  return False
119 
120  print("Comparison properly created")
121  return True
122 
123 
124 def check_for_content(revs, min_matrix_plots, min_plot_objects):
125  """
126  Checks for the expected content on the validation website
127  """
128  try:
129  import splinter
130  except ImportError:
131  print(
132  "The splinter package is required to run this test. Run 'pip3 "
133  "install splinter' to install"
134  )
135  # don't give an error exit code here to not fail if splinter is not
136  # available
137  return True
138 
139  with splinter.Browser() as browser:
140  # Visit URL
141  url = validation_url + "static/validation.html"
142  print("Opening {} to perform checks", url)
143  browser.visit(url)
144 
145  if len(browser.title) == 0:
146  print("Validation website cannot be loaded")
147  return False
148 
149  found_revs = browser.find_by_css(".revision-label")
150 
151  for r in revs:
152  rr = [web_r for web_r in found_revs if web_r.value == r]
153  if len(rr) == 0:
154  print(
155  "Revsion {} was not found on validation website. It "
156  "should be there.".format(r)
157  )
158  return False
159 
160  plot_objects = browser.find_by_css(".object")
161 
162  print(
163  "Checking for a minimum number of {} plot objects", min_plot_objects
164  )
165  if len(plot_objects) < min_plot_objects:
166  print(
167  "Only {} plots found, while {} are expected".format(
168  len(plot_objects), min_plot_objects
169  )
170  )
171  return False
172 
173  # click the overview check box
174  checkbox_overview = browser.find_by_id("check_show_overview")
175  # todo: does not work yet, checkbox is directly unchecked again
176  checkbox_overview.check()
177  # found_matrix_plots = browser.find_by_css(".plot_matrix_item")
178 
179  # if len(found_matrix_plots) < min_matrix_plots:
180  # print ("Only {} matrix plots found, while {} are expected".format(len(found_matrix_plots), min_matrix_plots))
181  # return False
182 
183  return True
184 
185 
186 def main():
187  """
188  Runs two test validations, starts the web server and queries data
189  """
190 
191  # fixme: See if we can reenable this test or at least run it locally
192  print("TEST SKIPPED: Not properly runnable on build bot", file=sys.stderr)
193  sys.exit(1)
194  # noinspection PyUnreachableCode
195 
196  # only run the test on dev machines with splinter installed. Also for the
197  # tests which don't use splinter, there are currently some connection
198  # problems to the test webserver on the central build system
199  try:
200  import splinter # noqa
201 
202  pass
203  except ImportError:
204  print(
205  "TEST SKIPPED: The splinter package is required to run this test."
206  + "Run 'pip3 install splinter' to install",
207  file=sys.stderr,
208  )
209  sys.exit(1)
210 
211  success = True
212 
213  revs_to_gen = ["stack_test_1", "stack_test_2", "stack_test_3"]
214 
215  # create a temporary test folder in order not to interfere with
216  # already existing validation results
217  with tempfile.TemporaryDirectory() as tmpdir:
218 
219  # switch to this folder
220  os.chdir(str(tmpdir))
221 
222  for r in revs_to_gen:
223  check_execute(f"validate_basf2 --test --tag {r}")
224 
225  # make sure the webserver process is terminated in any case
226  try:
227  # start webserver to serve json output files, plots and
228  # interactive website
229  server_process = subprocess.Popen(["run_validation_server"])
230 
231  # wait for one second for the server to start
232  time.sleep(2)
233  # check the content of the webserver, will need splinter
234  success = success and check_for_content(
235  revs_to_gen + ["reference"], 7, 7
236  )
237 
238  # check if the plott creating triggering works
239  success = success and check_for_plotting(
240  revs_to_gen[:-1], str(tmpdir)
241  )
242  except BaseException:
243  # catch any exceptions so the finally block can terminate the
244  # webserver process properly
245  e = sys.exc_info()[0]
246  # print exception again
247  print(f"Error {e}")
248  print(traceback.format_exc())
249  success = False
250  finally:
251  # send terminate command
252  server_process.terminate()
253  # wait for the webserver to actually terminate
254  server_process.wait()
255 
256  if not success:
257  sys.exit(1)
258 
259 
260 if __name__ == "__main__":
261  main()
Definition: main.py:1
int main(int argc, char **argv)
Run all tests.
Definition: test_main.cc:91
def get_html_plots_tag_comparison_json(output_base_dir, tags)
def get_html_plots_tag_comparison_folder(output_base_dir, tags)
def run_server(ip="127.0.0.1", port=8000, parse_command_line=False, open_site=False, dry_run=False)