Belle II Software  release-05-02-19
test_full_stack.py
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 
4 # standard
5 import sys
6 import subprocess
7 import tempfile
8 import os
9 import time
10 import requests
11 import traceback
12 
13 # ours
14 import validationserver
15 import validationpath
16 from validationtestutil import check_execute
17 
18 # url of the local validation webserver
19 validation_url = "http://localhost:8000/"
20 
21 
22 def start_webserver():
23  """
24  Start the validation server process, this will not
25  return. Therefore this function must be started within
26  a new subprocess
27  """
29 
30 
31 def http_post(command, json_args):
32  call_url = validation_url + command
33  print("Posting {} to {}".format(json_args, command))
34  r = requests.post(call_url,
35  json=json_args)
36  if not r.ok:
37  print("REST call {} with arguments {} failed".format(
38  call_url, json_args))
39  print(str(r))
40  return None
41 
42  return r
43 
44 
45 def check_for_plotting(revs, tmp_folder):
46  """
47  Checks if creating new plots for a revision combination works
48  :param revs: List of revisions
49  :type revs: List[str]
50  :param tmp_folder: Temporary folder
51  """
52 
53  print("Trying to recreate plots for revisions {}".format(revs))
54 
55  res = http_post("create_comparison", {"revision_list": revs})
56  if not res:
57  return False
58 
59  # will be used to check on the progress
60  prog_key = res.json()["progress_key"]
61 
62  done = False
63  wait_time = 0.1 # in seconds
64  max_wait_time = 3
65  summed_wait_time = 0
66 
67  # check the plot status with the webserver and only exit after a timeout
68  # or if the plot combination has been created
69  while not done:
70  res = http_post("check_comparison_status", {"input": prog_key})
71  if not res:
72  return False
73 
74  if res.json():
75  if res.json()["status"] == "complete":
76  # plots are done
77  break
78 
79  time.sleep(wait_time)
80  summed_wait_time += wait_time
81  if summed_wait_time > max_wait_time:
82  print("Waited for {} seconds for the requested plots to complete "
83  "and nothing happened".format(summed_wait_time))
84  return False
85 
86  # check if the plots are really present
87  comp_folder = \
89  comp_json = \
91 
92  if not os.path.exists(comp_folder):
93  print("Comparison folder {} does not exist".format(comp_folder))
94  return False
95  if not os.path.isfile(comp_json):
96  print("Comparison json {} does not exist".format(comp_json))
97  return False
98 
99  # just check for one random plot
100  some_plot = os.path.join(
101  comp_folder,
102  "validation-test",
103  "validationTestPlotsB_gaus_histogram.pdf"
104  )
105  if not os.path.isfile(some_plot):
106  print("Comparison plot {} does not exist".format(some_plot))
107  return False
108 
109  print("Comparison properly created")
110  return True
111 
112 
113 def check_for_content(revs, min_matrix_plots, min_plot_objects):
114  """
115  Checks for the expected content on the validation website
116  """
117  try:
118  import splinter
119  except ImportError:
120  print("The splinter package is required to run this test. Run 'pip3 "
121  "install splinter' to install")
122  # don't give an error exit code here to not fail if splinter is not
123  # available
124  return True
125 
126  with splinter.Browser() as browser:
127  # Visit URL
128  url = validation_url + "static/validation.html"
129  print("Opening {} to perform checks", url)
130  browser.visit(url)
131 
132  if len(browser.title) == 0:
133  print("Validation website cannot be loaded")
134  return False
135 
136  found_revs = browser.find_by_css(".revision-label")
137 
138  for r in revs:
139  rr = [web_r for web_r in found_revs if web_r.value == r]
140  if len(rr) == 0:
141  print("Revsion {} was not found on validation website. It "
142  "should be there.".format(r))
143  return False
144 
145  plot_objects = browser.find_by_css(".object")
146 
147  print("Checking for a minimum number of {} plot objects",
148  min_plot_objects)
149  if len(plot_objects) < min_plot_objects:
150  print("Only {} plots found, while {} are expected".format(
151  len(plot_objects), min_plot_objects))
152  return False
153 
154  # click the overview check box
155  checkbox_overview = browser.find_by_id("check_show_overview")
156  # todo: does not work yet, checkbox is directly unchecked again
157  checkbox_overview.check()
158  # found_matrix_plots = browser.find_by_css(".plot_matrix_item")
159 
160  # if len(found_matrix_plots) < min_matrix_plots:
161  # print ("Only {} matrix plots found, while {} are expected".format(len(found_matrix_plots), min_matrix_plots))
162  # return False
163 
164  return True
165 
166 
167 def main():
168  """
169  Runs two test validations, starts the web server and queries data
170  """
171 
172  # fixme: See if we can reenable this test or at least run it locally
173  print("TEST SKIPPED: Not properly runnable on build bot", file=sys.stderr)
174  sys.exit(1)
175  # noinspection PyUnreachableCode
176 
177  # only run the test on dev machines with splinter installed. Also for the
178  # tests which don't use splinter, there are currently some connection
179  # problems to the test webserver on the central build system
180  try:
181  import splinter
182  except ImportError:
183  print("TEST SKIPPED: The splinter package is required to run this test." +
184  "Run 'pip3 install splinter' to install", file=sys.stderr)
185  sys.exit(1)
186 
187  success = True
188 
189  revs_to_gen = ["stack_test_1", "stack_test_2", "stack_test_3"]
190 
191  # create a temporary test folder in order not to interfere with
192  # already existing validation results
193  with tempfile.TemporaryDirectory() as tmpdir:
194 
195  # switch to this folder
196  os.chdir(str(tmpdir))
197 
198  for r in revs_to_gen:
199  check_execute("validate_basf2 --test --tag {}".format(r))
200 
201  # make sure the webserver process is terminated in any case
202  try:
203  # start webserver to serve json output files, plots and
204  # interactive website
205  server_process = subprocess.Popen(["run_validation_server"])
206 
207  # wait for one second for the server to start
208  time.sleep(2)
209  # check the content of the webserver, will need splinter
210  success = success and \
211  check_for_content(revs_to_gen + ["reference"], 7, 7)
212 
213  # check if the plott creating triggering works
214  success = success and check_for_plotting(revs_to_gen[:-1], str(tmpdir))
215  except BaseException:
216  # catch any exceptions so the finally block can terminate the
217  # webserver process properly
218  e = sys.exc_info()[0]
219  # print exception again
220  print("Error {}".format(e))
221  print(traceback.format_exc())
222  success = False
223  finally:
224  # send terminate command
225  server_process.terminate()
226  # wait for the webserver to actually terminate
227  server_process.wait()
228 
229  if not success:
230  sys.exit(1)
231 
232 
233 if __name__ == "__main__":
234  main()
validationserver.run_server
def run_server(ip='127.0.0.1', port=8000, parse_command_line=False, open_site=False, dry_run=False)
Definition: validationserver.py:468
main
int main(int argc, char **argv)
Run all tests.
Definition: test_main.cc:77
validationpath.get_html_plots_tag_comparison_json
def get_html_plots_tag_comparison_json(output_base_dir, tags)
Return the absolute path json file with the comparison file.
Definition: validationpath.py:108
validationpath.get_html_plots_tag_comparison_folder
def get_html_plots_tag_comparison_folder(output_base_dir, tags)
Return the absolute path to the results folder.
Definition: validationpath.py:91