ATLAS Offline Software
Checks.py
Go to the documentation of this file.
1 # Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
2 import logging
3 from pathlib import Path
4 import subprocess
5 
6 from .Helpers import warnings_count
7 from .Inputs import references_CVMFS_path
8 from .Test import TestSetup, WorkflowCheck, WorkflowTest, WorkflowType
9 
10 
12  """Was the q test successful? To check simply count the number of lines containing the string "successful run"."""
13 
14  def run(self, test: WorkflowTest) -> bool:
15  result = True
16  printed_trf = False
17  for step in test.steps:
18  self.logger.info("-----------------------------------------------------")
19  log = test.validation_path / f"log.{step}"
20  counter = 0
21  warnings = []
22  errors = []
23  if log.exists():
24  with log.open() as file:
25  for line in file:
26  if ("ERROR" in line and "| ERROR |" not in line) or ("FATAL" in line and "| FATAL |" not in line):
27  errors.append(line[9:].strip())
28  elif "WARNING" in line and "| WARNING |" not in line:
29  warnings.append(line[9:].strip())
30  elif '"successful run"' in line:
31  counter += 1
32  elif step == "DQHistogramMerge" and "Writing file: myHIST.root" in line: # DQ merge hack
33  counter += 1
34 
35  if warnings:
36  self.logger.info(f"{step} validation test step WARNINGS")
37  warnings = list(dict.fromkeys(warnings))
38  for w in warnings:
39  self.logger.info(f" {w}")
40  self.logger.info("-----------------------------------------------------")
41 
42  if errors:
43  self.logger.info(f"{step} validation test step ERRORS")
44  errors = list(dict.fromkeys(errors))
45  for e in errors:
46  self.logger.info(f" {e}")
47  self.logger.info("-----------------------------------------------------")
48 
49  if counter and not errors:
50  self.logger.info(f"{step} validation test step successful")
51 
52  if step == "DQHistogramMerge":
53  self.logger.info(f"Full {step} step log:")
54  with log.open() as file:
55  for line in file:
56  self.logger.print(f" {line.strip()}")
57  self.logger.info("-----------------------------------------------------")
58  else:
59  result = False
60  if log.exists():
61  printed_trf = True # if one step fails, next steps are expected so don't be too verbose
62  self.logger.error(f"{step} validation test step failed")
63  self.logger.error(f"Full {step} step log:")
64  with log.open() as file:
65  for line in file:
66  self.logger.print(f" {line.strip()}")
67  self.logger.info("-----------------------------------------------------")
68  else:
69  self.logger.error(f"{step} validation test step did not run")
70  if not printed_trf:
71  printed_trf = True
72  self.logger.error("Full transform log:")
73  with (test.validation_path / f"{test.ID}.log").open() as file:
74  for line in file:
75  self.logger.print(f" {line.strip()}")
76  self.logger.info("-----------------------------------------------------")
77 
78  if self.setup.validation_only:
79  continue # Skip checking reference test because in this mode the clean tests have not been run
80 
81  log = test.reference_path / f"log.{step}"
82  counter = 0
83  with log.open() as file:
84  for line in file:
85  if '"successful run"' in line:
86  counter += 1
87  elif (step == "DQHistogramMerge" # DQ merge hack
88  and "Writing file: myHIST.root" in line):
89  counter += 1
90 
91  if counter:
92  self.logger.info(f"{step} reference test step successful")
93  else:
94  self.logger.error(f"{step} reference test step failed")
95  result = False
96 
97  if result:
98  self.logger.info(f"All {test.ID} athena steps completed successfully\n")
99  else :
100  self.logger.error(f"One or more {test.ID} Athena steps failed. Please investigate the cause.\n")
101 
102  return result
103 
104 
106  """Run Frozen Tier0 Policy Check."""
107 
108  def __init__(self, setup: TestSetup, input_format: str, max_events: int) -> None:
109  super().__init__(setup)
110  self.format = input_format
111  self.max_events = str(max_events)
112  self.detailed_comparison = setup.detailed_comparison
113 
114  def run(self, test: WorkflowTest) -> bool:
115  self.logger.info("---------------------------------------------------------------------------------------")
116  self.logger.info(f"Running {test.ID} Frozen Tier0 Policy Check on {self.format} for {self.max_events} events")
117 
118  diff_rules_path: Path = self.setup.diff_rules_path
119  diff_rules_exclusion_filename: str = f"{test.ID}_{self.format}_diff-exclusion-list.txt"
120  diff_rules_interest_filename: str = f"{test.ID}_{self.format}_diff-interest-list.txt"
121  diff_rules_file = None
122 
123  file_name = f"my{self.format}.pool.root"
124  if test.type == WorkflowType.Derivation:
125  file_name = f"{self.format}.myOutput.pool.root"
126  reference_file = self.reference_file(test, file_name)
127  if reference_file is None:
128  self.logger.error(f"Reference file {file_name} not found")
129  return False
130 
131  if self.setup.validation_only:
132  cvmfs_path = Path(references_CVMFS_path)
133  diff_rules_path = cvmfs_path / self.setup.release_ID / test.ID
134 
135  self.logger.info(f"Reading the reference file from location {reference_file}")
136 
137  # try to get the exclusion list or the list of branches of interest
138  branches_of_interest = False
139  if self.setup.diff_rules_path is None:
140  diff_rules_exclusion_local_path = test.validation_path / diff_rules_exclusion_filename
141  diff_rules_interest_local_path = test.validation_path / diff_rules_interest_filename
142  subprocess.Popen(["/bin/bash", "-c", f"cd {test.validation_path}; get_files -remove -data {diff_rules_exclusion_filename}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
143  subprocess.Popen(["/bin/bash", "-c", f"cd {test.validation_path}; get_files -remove -data {diff_rules_interest_filename}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
144  if not diff_rules_exclusion_local_path.exists() and not diff_rules_interest_local_path.exists():
145  self.logger.info(f"Neither '{diff_rules_exclusion_local_path}' nor '{diff_rules_interest_local_path}' files exist in the release.")
146  elif diff_rules_exclusion_local_path.exists():
147  diff_rules_file = diff_rules_exclusion_local_path
148  elif diff_rules_interest_local_path.exists():
149  diff_rules_file = diff_rules_interest_local_path
150  branches_of_interest = True
151 
152  if diff_rules_file is None and diff_rules_path is not None:
153  diff_rules_file = diff_rules_path / diff_rules_exclusion_filename
154  if not diff_rules_file.exists():
155  diff_rules_file = diff_rules_path / diff_rules_interest_filename
156  if diff_rules_file.exists():
157  branches_of_interest = True
158 
159  if diff_rules_file is not None and diff_rules_file.exists():
160  self.logger.info(f"Reading the diff rules file from location {diff_rules_file}")
161  diff_root_list = []
162  with diff_rules_file.open() as f:
163  for line in f:
164  stripped_line = line.rstrip()
165  if stripped_line and stripped_line[0] != '#':
166  diff_root_list.append(r"'{}'".format(stripped_line))
167  else:
168  self.logger.info("No diff rules file exists, using the default list")
169  diff_root_list = [r"'index_ref'", r"'(.*)_timings(.*)'", r"'(.*)_mems(.*)'"]
170 
171  validation_file = test.validation_path / file_name
172  log_file = test.validation_path / f"diff-root-{test.ID}.{self.format}.log"
173  diff_root_list = " ".join(diff_root_list)
174  diff_root_mode = "--branches-of-interest" if branches_of_interest else "--ignore-leaves"
175 
176  comparison_mode = "detailed" if self.detailed_comparison else "semi-detailed"
177  comparison_command = f"acmd.py diff-root {reference_file} {validation_file} --order-trees --nan-equal --exact-branches --mode {comparison_mode} --error-mode resilient {diff_root_mode} {diff_root_list} --entries {self.max_events} > {log_file} 2>&1"
178  output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
179  output, error = output.decode("utf-8"), error.decode("utf-8")
180 
181  # We want to catch/print both container additions/subtractions as well as
182  # changes in these containers. `allGood_return_code` is meant to catch
183  # other issues found in the diff (not expected, but just to be safe)
184  passed_frozen_tier0_test = True
185  all_good = False
186  with log_file.open() as file:
187  for line in file:
188  if "WARNING" in line: # Catches container addition/subtractions
189  self.logger.error(line.strip())
190  passed_frozen_tier0_test = False
191  if "leaves differ" in line: # Catches changes in branches
192  self.logger.error(line.strip())
193  passed_frozen_tier0_test = False
194  if "ERROR" in line: # Catches other issues (including unmatched branches)
195  self.logger.error(line.strip())
196  passed_frozen_tier0_test = False
197  if "INFO all good." in line:
198  all_good = True
199 
200  result = passed_frozen_tier0_test and all_good
201  if result:
202  self.logger.info("Passed!\n")
203  else:
204  # print CI helper directly to avoid logger decorations
205  if self.setup.disable_release_setup:
206  self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
207  self.logger.print("")
208 
209  if "DAOD" in self.format:
210  self.logger.error(f"Your change breaks the frozen derivation policy in test {test.ID}.")
211  self.logger.error("Please make sure you explain the reason for the change and ask relevant experts for approval.")
212  else:
213  self.logger.error(f"Your change breaks the frozen tier0 policy in test {test.ID}.")
214  self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
215 
216  # copy the artifacts
217  if self.setup.disable_release_setup:
218  comparison_command = f"CopyCIArtifact.sh {validation_file}"
219  output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
220  output, error = output.decode("utf-8"), error.decode("utf-8")
221 
222  if error or not output:
223  self.logger.error(f"Tried copying '{validation_file}' to the CI artifacts area but it failed.")
224  self.logger.error(f" {error.strip()}")
225  else:
226  self.logger.error(output)
227 
228  with log_file.open() as file:
229  for line in file:
230  self.logger.info(f" {line.strip()}")
231  self.logger.info("-----------------------------------------------------\n")
232 
233  return result
234 
235 
237  """Run metadata check."""
238 
239  def __init__(self, setup: TestSetup, input_format: str) -> None:
240  super().__init__(setup)
241  self.format = input_format
242 
243  def run(self, test: WorkflowTest) -> bool:
244  self.logger.info("---------------------------------------------------------------------------------------")
245  self.logger.info(f"Running {test.ID} metadata check on {self.format}")
246 
247  file_name = f"my{self.format}.pool.root"
248  if test.type == WorkflowType.Derivation:
249  file_name = f"{self.format}.myOutput.pool.root"
250 
251  reference_file = self.reference_file(test, file_name)
252  if reference_file is None:
253  self.logger.error(f"Reference file {file_name} not found")
254  return False
255 
256  self.logger.info(f"Reading the reference file from location {reference_file}")
257 
258  exclusion_list = " ".join(["file_guid", "file_size", "/TagInfo/AtlasRelease", "FileMetaData/productionRelease", "StreamDAOD_PHYS/eventTypes", "StreamDAOD_PHYSLITE/eventTypes"])
259 
260  validation_file = test.validation_path / file_name
261  log_file = test.validation_path / f"meta-diff-{test.ID}.{self.format}.log"
262 
263  comparison_command = f"meta-diff --ordered -m full -x diff {reference_file} {validation_file} --drop {exclusion_list} --ignoreTrigger > {log_file} 2>&1"
264  output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
265  output, error = output.decode("utf-8"), error.decode("utf-8")
266 
267  result = True
268  with log_file.open() as file:
269  for line in file:
270  if line.strip():
271  result = False
272 
273  if result:
274  self.logger.info("Passed!\n")
275  else:
276  # print CI helper directly to avoid logger decorations
277  if self.setup.disable_release_setup:
278  self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
279  self.logger.print("")
280 
281  self.logger.error(f"Your change breaks the frozen tier0 policy in test {test.ID}.")
282  self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
283 
284  # copy the artifacts
285  if self.setup.disable_release_setup:
286  comparison_command = f"CopyCIArtifact.sh {validation_file}"
287  output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
288  output, error = output.decode("utf-8"), error.decode("utf-8")
289 
290  if error or not output:
291  self.logger.error(f"Tried copying '{validation_file}' to the CI artifacts area but it failed.")
292  self.logger.error(f" {error.strip()}")
293  else:
294  self.logger.error(output)
295 
296  with log_file.open() as file:
297  for line in file:
298  self.logger.info(f" {line.strip()}")
299  self.logger.info("-----------------------------------------------------\n")
300 
301  return result
302 
304  """Run AOD Content Check."""
305 
306  def run(self, test: WorkflowTest) -> bool:
307  self.logger.info("---------------------------------------------------------------------------------------")
308  self.logger.info(f"Running {test.ID} AOD content check")
309 
310  file_name = "myAOD.pool.root"
311  output_name = f"{test.ID}_AOD_content.txt"
312 
313  validation_file = test.validation_path / file_name
314  validation_output = test.validation_path / output_name
315  validation_command = f"acmd.py chk-file {validation_file} | awk '/---/{{flag=1;next}}/===/{{flag=0}}flag' | awk '{{print $10}}' | LC_ALL=C sort | uniq > {validation_output}"
316 
317  output_val, error_val = subprocess.Popen(["/bin/bash", "-c", validation_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
318  output_val, error_val = output_val.decode("utf-8"), error_val.decode("utf-8")
319  if error_val:
320  self.logger.error(f"Something went wrong with retrieving the content for test {test.ID}:")
321  self.logger.error(error_val)
322 
323  # Read references
324  if self.setup.validation_only:
325  # try to get the reference
326  reference_path = test.validation_path
327  reference_output_name = f"{test.ID}_AOD_content.ref"
328  reference_output = reference_path / reference_output_name
329  subprocess.Popen(["/bin/bash", "-c", f"cd {reference_path}; get_files -remove -data {reference_output_name}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
330  if not reference_output.exists():
331  self.logger.info(f"No reference file '{reference_output_name}' to compare the content with.")
332  return True
333  else:
334  reference_path = test.reference_path
335  reference_output = reference_path / output_name
336  reference_file = reference_path / file_name
337 
338  reference_command = f"acmd.py chk-file {reference_file} | awk '/---/{{flag=1;next}}/===/{{flag=0}}flag' | awk '{{print $10}}' | LC_ALL=C sort | uniq > {reference_output}"
339  subprocess.Popen(["/bin/bash", "-c", reference_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
340 
341  # Remove HLT containers in some cases
342  extra_diff_args = ""
343  if test.type == WorkflowType.MCReco or test.type == WorkflowType.MCPileUpReco:
344  extra_diff_args = "-I '^HLT' -I '^LVL1' -I '^L1'"
345 
346  # Compute the diff
347  diff_output, diff_error = subprocess.Popen(["/bin/bash", "-c", f"diff {extra_diff_args} {reference_output} {validation_output}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
348  diff_output, diff_error = diff_output.decode("utf-8"), diff_error.decode("utf-8")
349 
350  result = False
351  if not diff_output and not diff_error:
352  self.logger.info("Passed!\n")
353  result = True
354  else:
355  # print CI helper directly to avoid logger decorations
356  if self.setup.disable_release_setup:
357  self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
358  self.logger.print("")
359 
360  self.logger.error(f"Your change modifies the output in test {test.ID}.")
361  self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
362  if self.setup.validation_only:
363  self.logger.error(f"The output '{output_name}' (>) differs from the reference '{reference_output_name}' (<):")
364  else:
365  self.logger.error(f"The output '{validation_output}' (>) differs from the reference '{reference_output}' (<):")
366  if diff_output:
367  self.logger.print("")
368  self.logger.print(diff_output)
369  if diff_error:
370  self.logger.print(diff_error)
371  self.logger.info("-----------------------------------------------------\n")
372 
373  return result
374 
375 
377  """Run AOD Digest Check."""
378 
379  def __init__(self, setup: TestSetup, max_events: int = -1) -> None:
380  super().__init__(setup)
381  self.max_events = str(max_events)
382 
383  def run(self, test: WorkflowTest) -> bool:
384  self.logger.info("---------------------------------------------------------------------------------------")
385  self.logger.info(f"Running {test.ID} AOD digest")
386 
387  file_name = "myAOD.pool.root"
388  output_name = f"{test.ID}_AOD_digest.txt"
389 
390  validation_file = test.validation_path / file_name
391  validation_output = test.validation_path / output_name
392  validation_log_file = test.validation_path / f"AODdigest-{test.ID}.log"
393  validation_command = f"xAODDigest.py {validation_file} {validation_output} > {validation_log_file} 2>&1"
394 
395  output_val, error_val = subprocess.Popen(["/bin/bash", "-c", validation_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
396  output_val, error_val = output_val.decode("utf-8"), error_val.decode("utf-8")
397  if error_val:
398  self.logger.error(f"Something went wrong with the digest calculation for test {test.ID}:")
399  self.logger.error(error_val)
400 
401  # Read references
402  if self.setup.validation_only:
403  # try to get the reference
404  reference_path = test.validation_path
405  reference_output_name = f"{test.ID}_AOD_digest.ref"
406  reference_output = reference_path / reference_output_name
407  subprocess.Popen(["/bin/bash", "-c", f"cd {reference_path}; get_files -remove -data {reference_output_name}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
408  if not reference_output.exists():
409  self.logger.info(f"No reference file '{reference_output_name}' to compare the digest with. Printing the full digest:")
410  with validation_output.open() as f:
411  for line in f:
412  self.logger.print(f" {line.strip()}")
413  return True
414  else:
415  reference_path = test.reference_path
416  reference_output = reference_path / output_name
417  reference_file = reference_path / file_name
418  reference_log_file = test.reference_path / f"AODdigest-{test.ID}.log"
419 
420  reference_command = f"xAODDigest.py {reference_file} {reference_output} > {reference_log_file} 2>&1"
421  subprocess.Popen(["/bin/bash", "-c", reference_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
422 
423  # Compute the diff
424  diff_output, diff_error = subprocess.Popen(["/bin/bash", "-c", f"diff {reference_output} {validation_output}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
425  diff_output, diff_error = diff_output.decode("utf-8"), diff_error.decode("utf-8")
426 
427  result = False
428  if not diff_output and not diff_error:
429  self.logger.info("Passed!\n")
430  result = True
431  else:
432  # print CI helper directly to avoid logger decorations
433  if self.setup.disable_release_setup:
434  self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
435  self.logger.print("")
436 
437  self.logger.error(f"Your change breaks the digest in test {test.ID}.")
438  self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
439  if self.setup.validation_only:
440  self.logger.error(f"The output '{output_name}' (>) differs from the reference '{reference_output_name}' (<):")
441  else:
442  self.logger.error(f"The output '{validation_output}' (>) differs from the reference '{reference_output}' (<):")
443  if diff_output:
444  with reference_output.open() as file:
445  self.logger.print(file.readline())
446  self.logger.print(diff_output)
447  if diff_error:
448  self.logger.print(diff_error)
449  self.logger.info("-----------------------------------------------------\n")
450 
451  return result
452 
453 
455  """Run A Very Simple Check."""
456 
457  def __init__(self, setup: TestSetup, name: str, quantity: str, unit: str, field: int, threshold: float):
458  super().__init__(setup)
459  self.name = name
460  self.quantity = quantity
461  self.unit = unit
462  self.field = field
463  self.threshold = threshold
464 
465  def run(self, test: WorkflowTest) -> bool:
466  self.logger.info("-----------------------------------------------------")
467  self.logger.info(f"Running {test.ID} {self.name} Check")
468 
469  result = True
470  for step in test.steps:
471  log_name = f"log.{step}"
472  reference_log = test.reference_path / log_name
473  validation_log = test.validation_path / log_name
474 
475  reference_value = 0
476  with reference_log.open() as file:
477  found = False
478  for line in file:
479  if self.quantity in line:
480  reference_value = float(line.split()[self.field])
481  found = True
482  break
483  if not found:
484  self.logger.error(f"No data available in {reference_log}. Job failed.")
485  return False
486 
487  validation_value = 0
488  with validation_log.open() as file:
489  found = False
490  for line in file:
491  if self.quantity in line:
492  validation_value = float(line.split()[self.field])
493  found = True
494  break
495  if not found:
496  self.logger.error(f"No data available in {validation_log}. Job failed.")
497  return False
498 
499  if reference_value != 0:
500  factor = validation_value / reference_value
501 
502  # Error if the factor increases (very bad things)
503  # Warning if the factor decreases (should be an understood feature)
504  if factor > 1. + self.threshold:
505  self.logger.error(f"{self.quantity} in the {step} step with(out) your change is {validation_value} ({reference_value}) {self.unit}")
506  self.logger.error(f"Your change changes {self.quantity} by a factor {factor}")
507  self.logger.error("Is this an expected outcome of your change(s)?")
508  result = False
509  self.logger.error(f"{step}: {self.name}")
510  self.logger.error(f"ref {reference_value} {self.unit}")
511  self.logger.error(f"val {validation_value} {self.unit}")
512  if factor < 1. - self.threshold:
513  self.logger.warning(f"{self.quantity} in the {step} step with(out) your change is {validation_value} ({reference_value}) {self.unit}")
514  self.logger.warning(f"Your change changes {self.quantity} by a factor {factor}")
515  self.logger.warning("Is this an expected outcome of your change(s)?")
516  result = True
517  self.logger.warning(f"{step}: {self.name}")
518  self.logger.warning(f"ref {reference_value} {self.unit}")
519  self.logger.warning(f"val {validation_value} {self.unit}")
520 
521  if result:
522  self.logger.info("Passed!\n")
523  else :
524  self.logger.error("Failed!\n")
525 
526  return result
527 
528 
530  """Run WARNINGS check."""
531 
532  def run(self, test: WorkflowTest):
533  self.logger.info("-----------------------------------------------------")
534  self.logger.info(f"Running {test.ID} WARNINGS Check\n")
535 
536  result = True
537  for step in test.steps:
538  log_name = f"log.{step}"
539  reference_log = test.reference_path / log_name
540  validation_log = test.validation_path / log_name
541  warnings_reference = warnings_count(reference_log)
542  warnings_validation = warnings_count (validation_log)
543 
544  wr=[]
545  for w in warnings_reference:
546  wr.append(w[9:])
547  wv=[]
548  for w in warnings_validation:
549  wv.append(w[9:])
550 
551  wn = list(set(wv)-set(wr))
552  wo = list(set(wr)-set(wv))
553 
554 
555  if len(warnings_validation) > len(warnings_reference):
556  self.logger.error(f"Validation log file {validation_log} has {len(warnings_validation) - len(warnings_reference)} more warning(s) than the reference log file {reference_log}")
557  self.logger.error("Please remove the new warning message(s):")
558  for w in wn:
559  self.logger.error(w)
560  result = False
561  elif len(warnings_validation) < len(warnings_reference):
562  self.logger.info(f"Validation log file {validation_log} has {len(warnings_reference) - len(warnings_validation)} less warnings than the reference log file {reference_log}")
563  self.logger.info("The reduction of unnecessary WARNINGs is much appreciated. Is it expected?")
564  self.logger.info("The following warning messages have been removed:")
565  for w in wo:
566  self.logger.info(w)
567  result = True
568  else :
569  self.logger.info(f"Validation log file {validation_log} has the same number of warnings as the reference log file {reference_log}")
570  result = True
571 
572  if result:
573  self.logger.info("Passed!\n")
574  else :
575  self.logger.error("Failed!\n")
576 
577  return result
578 
579 
581  """Run FPE check."""
582 
583  # Ignore FPEs for these tests:
584  ignoreTestRuns = []
585  ignoreTestTypes = [WorkflowType.FullSim, WorkflowType.AF3]
586  ignoreTestIDs = ['x686']
587 
588  def run(self, test: WorkflowTest):
589  self.logger.info("-----------------------------------------------------")
590  self.logger.info(f"Running {test.ID} FPE Check")
591 
592  result = True
593  for step in test.steps:
594  log = test.validation_path / f"log.{step}"
595  fpes = {}
596  stack_traces = {}
597  with log.open() as file:
598  last_stack_trace = None
599  for line in file:
600  if "WARNING FPE" in line:
601  last_stack_trace = None
602  fpe = None
603  for part in reversed(line.split()):
604  if "[" in part:
605  fpe = part.strip().replace("[", "").replace("]", "")
606  break
607  if fpe:
608  if fpe in fpes:
609  fpes[fpe] += 1
610  else:
611  fpes[fpe] = 1
612  last_stack_trace = []
613  stack_traces[fpe] = last_stack_trace
614  elif "FPE stacktrace" in line and last_stack_trace is not None:
615  line = next(file)
616  last_stack_trace.append(line.strip()[9:])
617 
618  if fpes.keys():
619  msgLvl = logging.WARNING if test.run in self.ignoreTestRuns or test.type in self.ignoreTestTypes or test.ID in self.ignoreTestIDs else logging.ERROR
620  result = False
621  self.logger.log(msgLvl, f" {step} validation test step FPEs")
622  for fpe, count in sorted(fpes.items(), key=lambda item: item[1]):
623  self.logger.log(msgLvl, f"{count:>5} {fpe}")
624  for fpe in fpes.keys():
625  self.logger.log(msgLvl, "-----------------------------------------------------")
626  self.logger.log(msgLvl, f" first stack trace for algorithm {fpe}:")
627  for line in stack_traces[fpe]:
628  self.logger.log(msgLvl, line)
629  self.logger.log(msgLvl, "-----------------------------------------------------")
630 
631  if result:
632  self.logger.info("Passed!\n")
633  elif test.run in self.ignoreTestRuns or test.type in self.ignoreTestTypes or test.ID in self.ignoreTestIDs:
634  self.logger.warning("Failed!")
635  self.logger.warning("Check disabled due to irreproducibilities!\n")
636  result = True
637  else:
638  self.logger.error("Failed!\n")
639 
640  return result
grepfile.info
info
Definition: grepfile.py:38
replace
std::string replace(std::string s, const std::string &s2, const std::string &s3)
Definition: hcg.cxx:307
python.Checks.FailedOrPassedCheck
Definition: Checks.py:11
python.Checks.SimpleCheck
Definition: Checks.py:454
python.Test.WorkflowCheck
Definition: Tools/WorkflowTestRunner/python/Test.py:84
python.Test.WorkflowCheck.setup
setup
Definition: Tools/WorkflowTestRunner/python/Test.py:88
python.Checks.FrozenTier0PolicyCheck.run
bool run(self, WorkflowTest test)
Definition: Checks.py:114
python.Checks.FPECheck.ignoreTestIDs
ignoreTestIDs
Definition: Checks.py:586
python.Checks.FrozenTier0PolicyCheck.max_events
max_events
Definition: Checks.py:111
python.Checks.SimpleCheck.field
field
Definition: Checks.py:462
python.Checks.SimpleCheck.name
name
Definition: Checks.py:459
python.Checks.FPECheck
Definition: Checks.py:580
python.Checks.AODContentCheck.run
bool run(self, WorkflowTest test)
Definition: Checks.py:306
python.Checks.FailedOrPassedCheck.run
bool run(self, WorkflowTest test)
Definition: Checks.py:14
python.Checks.MetadataCheck
Definition: Checks.py:236
python.Checks.MetadataCheck.__init__
None __init__(self, TestSetup setup, str input_format)
Definition: Checks.py:239
python.Test.WorkflowCheck.logger
logger
Definition: Tools/WorkflowTestRunner/python/Test.py:89
fillPileUpNoiseLumi.next
next
Definition: fillPileUpNoiseLumi.py:52
python.Checks.MetadataCheck.run
bool run(self, WorkflowTest test)
Definition: Checks.py:243
python.Checks.AODDigestCheck
Definition: Checks.py:376
python.Checks.WarningsComparisonCheck.run
def run(self, WorkflowTest test)
Definition: Checks.py:532
python.Checks.FrozenTier0PolicyCheck.format
format
Definition: Checks.py:110
python.Checks.FrozenTier0PolicyCheck.__init__
None __init__(self, TestSetup setup, str input_format, int max_events)
Definition: Checks.py:108
python.Checks.WarningsComparisonCheck
Definition: Checks.py:529
python.Checks.FrozenTier0PolicyCheck
Definition: Checks.py:105
histSizes.list
def list(name, path='/')
Definition: histSizes.py:38
python.Checks.SimpleCheck.run
bool run(self, WorkflowTest test)
Definition: Checks.py:465
python.Checks.AODDigestCheck.run
bool run(self, WorkflowTest test)
Definition: Checks.py:383
DerivationFramework::TriggerMatchingUtils::sorted
std::vector< typename T::value_type > sorted(T begin, T end)
Helper function to create a sorted vector from an unsorted one.
python.Checks.FrozenTier0PolicyCheck.detailed_comparison
detailed_comparison
Definition: Checks.py:112
CxxUtils::set
constexpr std::enable_if_t< is_bitmask_v< E >, E & > set(E &lhs, E rhs)
Convenience function to set bits in a class enum bitmask.
Definition: bitmask.h:232
print
void print(char *figname, TCanvas *c1)
Definition: TRTCalib_StrawStatusPlots.cxx:25
python.Helpers.warnings_count
List[str] warnings_count(Path file_name)
Definition: Tools/WorkflowTestRunner/python/Helpers.py:69
TCS::join
std::string join(const std::vector< std::string > &v, const char c=',')
Definition: Trigger/TrigT1/L1Topo/L1TopoCommon/Root/StringUtils.cxx:10
python.Checks.FPECheck.run
def run(self, WorkflowTest test)
Definition: Checks.py:588
python.Checks.MetadataCheck.format
format
Definition: Checks.py:241
python.Test.WorkflowCheck.reference_file
Optional[Path] reference_file(self, "WorkflowTest" test, str file_name)
Definition: Tools/WorkflowTestRunner/python/Test.py:91
Trk::open
@ open
Definition: BinningType.h:40
python.Checks.SimpleCheck.__init__
def __init__(self, TestSetup setup, str name, str quantity, str unit, int field, float threshold)
Definition: Checks.py:457
python.Checks.SimpleCheck.unit
unit
Definition: Checks.py:461
python.CaloCondTools.log
log
Definition: CaloCondTools.py:20
str
Definition: BTagTrackIpAccessor.cxx:11
python.Checks.AODContentCheck
Definition: Checks.py:303
python.Checks.AODDigestCheck.max_events
max_events
Definition: Checks.py:381
python.Checks.SimpleCheck.quantity
quantity
Definition: Checks.py:460
error
Definition: IImpactPoint3dEstimator.h:70
python.Checks.FPECheck.ignoreTestRuns
ignoreTestRuns
Definition: Checks.py:584
readCCLHist.float
float
Definition: readCCLHist.py:83
python.Checks.SimpleCheck.threshold
threshold
Definition: Checks.py:463
python.Checks.AODDigestCheck.__init__
None __init__(self, TestSetup setup, int max_events=-1)
Definition: Checks.py:379
python.Checks.FPECheck.ignoreTestTypes
ignoreTestTypes
Definition: Checks.py:585