ATLAS Offline Software
Loading...
Searching...
No Matches
Checks.py
Go to the documentation of this file.
1# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
2import logging
3from pathlib import Path
4import subprocess
5
6from .Helpers import warnings_count
7from .Inputs import references_CVMFS_path
8from .Test import TestSetup, WorkflowCheck, WorkflowTest, WorkflowType
9
10
12 """Was the q test successful? To check simply count the number of lines containing the string "successful run"."""
13
14 def run(self, test: WorkflowTest) -> bool:
15 result = True
16 printed_trf = False
17 for step in test.steps:
18 self.logger.info("-----------------------------------------------------")
19 log = test.validation_path / f"log.{step}"
20 counter = 0
21 warnings = []
22 errors = []
23 if log.exists():
24 with log.open() as file:
25 for line in file:
26 if ("ERROR" in line and "| ERROR |" not in line) or ("FATAL" in line and "| FATAL |" not in line):
27 errors.append(line[9:].strip())
28 elif "WARNING" in line and "| WARNING |" not in line:
29 warnings.append(line[9:].strip())
30 elif '"successful run"' in line:
31 counter += 1
32 elif step == "DQHistogramMerge" and "Writing file: myHIST.root" in line: # DQ merge hack
33 counter += 1
34
35 if warnings:
36 self.logger.info(f"{step} validation test step WARNINGS")
37 warnings = list(dict.fromkeys(warnings))
38 for w in warnings:
39 self.logger.info(f" {w}")
40 self.logger.info("-----------------------------------------------------")
41
42 if errors:
43 self.logger.info(f"{step} validation test step ERRORS")
44 errors = list(dict.fromkeys(errors))
45 for e in errors:
46 self.logger.info(f" {e}")
47 self.logger.info("-----------------------------------------------------")
48
49 if counter and not errors:
50 self.logger.info(f"{step} validation test step successful")
51
52 if step == "DQHistogramMerge":
53 self.logger.info(f"Full {step} step log:")
54 with log.open() as file:
55 for line in file:
56 self.logger.print(f" {line.strip()}")
57 self.logger.info("-----------------------------------------------------")
58 else:
59 result = False
60 if log.exists():
61 printed_trf = True # if one step fails, next steps are expected so don't be too verbose
62 self.logger.error(f"{step} validation test step failed")
63 self.logger.error(f"Full {step} step log:")
64 with log.open() as file:
65 for line in file:
66 self.logger.print(f" {line.strip()}")
67 self.logger.info("-----------------------------------------------------")
68 else:
69 self.logger.error(f"{step} validation test step did not run")
70 if not printed_trf:
71 printed_trf = True
72 self.logger.error("Full transform log:")
73 with (test.validation_path / f"{test.ID}.log").open() as file:
74 for line in file:
75 self.logger.print(f" {line.strip()}")
76 self.logger.info("-----------------------------------------------------")
77
78 if self.setup.validation_only:
79 continue # Skip checking reference test because in this mode the clean tests have not been run
80
81 log = test.reference_path / f"log.{step}"
82 counter = 0
83 with log.open() as file:
84 for line in file:
85 if '"successful run"' in line:
86 counter += 1
87 elif (step == "DQHistogramMerge" # DQ merge hack
88 and "Writing file: myHIST.root" in line):
89 counter += 1
90
91 if counter:
92 self.logger.info(f"{step} reference test step successful")
93 else:
94 self.logger.error(f"{step} reference test step failed")
95 result = False
96
97 if result:
98 self.logger.info(f"All {test.ID} athena steps completed successfully\n")
99 else :
100 self.logger.error(f"One or more {test.ID} Athena steps failed. Please investigate the cause.\n")
101
102 return result
103
104
106 """Run Frozen Tier0 Policy Check."""
107
108 def __init__(self, setup: TestSetup, input_format: str, max_events: int) -> None:
109 super().__init__(setup)
110 self.format = input_format
111 self.max_events = str(max_events)
112 self.detailed_comparison = setup.detailed_comparison
113
114 def run(self, test: WorkflowTest) -> bool:
115 self.logger.info("---------------------------------------------------------------------------------------")
116 self.logger.info(f"Running {test.ID} Frozen Tier0 Policy Check on {self.format} for {self.max_events} events")
117
118 diff_rules_path: Path = self.setup.diff_rules_path
119 diff_rules_exclusion_filename: str = f"{test.ID}_{self.format}_diff-exclusion-list.txt"
120 diff_rules_interest_filename: str = f"{test.ID}_{self.format}_diff-interest-list.txt"
121 diff_rules_file = None
122
123 file_name = f"my{self.format}.pool.root"
124 if test.type == WorkflowType.Derivation:
125 file_name = f"{self.format}.myOutput.pool.root"
126 reference_file = self.reference_file(test, file_name)
127 if reference_file is None:
128 self.logger.error(f"Reference file {file_name} not found")
129 return False
130
131 if self.setup.validation_only:
132 cvmfs_path = Path(references_CVMFS_path)
133 diff_rules_path = cvmfs_path / self.setup.release_ID / test.ID
134
135 self.logger.info(f"Reading the reference file from location {reference_file}")
136
137 # try to get the exclusion list or the list of branches of interest
138 branches_of_interest = False
139 if self.setup.diff_rules_path is None:
140 diff_rules_exclusion_local_path = test.validation_path / diff_rules_exclusion_filename
141 diff_rules_interest_local_path = test.validation_path / diff_rules_interest_filename
142 subprocess.Popen(["/bin/bash", "-c", f"cd {test.validation_path}; get_files -remove -data {diff_rules_exclusion_filename}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
143 subprocess.Popen(["/bin/bash", "-c", f"cd {test.validation_path}; get_files -remove -data {diff_rules_interest_filename}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
144 if not diff_rules_exclusion_local_path.exists() and not diff_rules_interest_local_path.exists():
145 self.logger.info(f"Neither '{diff_rules_exclusion_local_path}' nor '{diff_rules_interest_local_path}' files exist in the release.")
146 elif diff_rules_exclusion_local_path.exists():
147 diff_rules_file = diff_rules_exclusion_local_path
148 elif diff_rules_interest_local_path.exists():
149 diff_rules_file = diff_rules_interest_local_path
150 branches_of_interest = True
151
152 if diff_rules_file is None and diff_rules_path is not None:
153 diff_rules_file = diff_rules_path / diff_rules_exclusion_filename
154 if not diff_rules_file.exists():
155 diff_rules_file = diff_rules_path / diff_rules_interest_filename
156 if diff_rules_file.exists():
157 branches_of_interest = True
158
159 if diff_rules_file is not None and diff_rules_file.exists():
160 self.logger.info(f"Reading the diff rules file from location {diff_rules_file}")
161 diff_root_list = []
162 with diff_rules_file.open() as f:
163 for line in f:
164 stripped_line = line.rstrip()
165 if stripped_line and stripped_line[0] != '#':
166 diff_root_list.append(r"'{}'".format(stripped_line))
167 else:
168 self.logger.info("No diff rules file exists, using the default list")
169 diff_root_list = [r"'index_ref'", r"'(.*)_timings(.*)'", r"'(.*)_mems(.*)'"]
170
171 validation_file = test.validation_path / file_name
172 log_file = test.validation_path / f"diff-root-{test.ID}.{self.format}.log"
173 diff_root_list = " ".join(diff_root_list)
174 diff_root_mode = "--branches-of-interest" if branches_of_interest else "--ignore-leaves"
175
176 comparison_mode = "detailed" if self.detailed_comparison else "semi-detailed"
177 comparison_command = f"acmd.py diff-root {reference_file} {validation_file} --order-trees --nan-equal --exact-branches --mode {comparison_mode} --error-mode resilient {diff_root_mode} {diff_root_list} --entries {self.max_events} > {log_file} 2>&1"
178 output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
179 output, error = output.decode("utf-8"), error.decode("utf-8")
180
181 # We want to catch/print both container additions/subtractions as well as
182 # changes in these containers. `allGood_return_code` is meant to catch
183 # other issues found in the diff (not expected, but just to be safe)
184 passed_frozen_tier0_test = True
185 all_good = False
186 with log_file.open() as file:
187 for line in file:
188 if "WARNING" in line: # Catches container addition/subtractions
189 self.logger.error(line.strip())
190 passed_frozen_tier0_test = False
191 if "leaves differ" in line: # Catches changes in branches
192 self.logger.error(line.strip())
193 passed_frozen_tier0_test = False
194 if "ERROR" in line: # Catches other issues (including unmatched branches)
195 self.logger.error(line.strip())
196 passed_frozen_tier0_test = False
197 if "INFO all good." in line:
198 all_good = True
199
200 result = passed_frozen_tier0_test and all_good
201 if result:
202 self.logger.info("Passed!\n")
203 else:
204 # print CI helper directly to avoid logger decorations
205 if self.setup.disable_release_setup:
206 self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
207 self.logger.print("")
208
209 if "DAOD" in self.format:
210 self.logger.error(f"Your change breaks the frozen derivation policy in test {test.ID}.")
211 self.logger.error("Please make sure you explain the reason for the change and ask relevant experts for approval.")
212 else:
213 self.logger.error(f"Your change breaks the frozen tier0 policy in test {test.ID}.")
214 self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
215
216 # copy the artifacts
217 if self.setup.disable_release_setup:
218 comparison_command = f"CopyCIArtifact.sh {validation_file}"
219 output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
220 output, error = output.decode("utf-8"), error.decode("utf-8")
221
222 if error or not output:
223 self.logger.error(f"Tried copying '{validation_file}' to the CI artifacts area but it failed.")
224 self.logger.error(f" {error.strip()}")
225 else:
226 self.logger.error(output)
227
228 with log_file.open() as file:
229 for line in file:
230 self.logger.info(f" {line.strip()}")
231 self.logger.info("-----------------------------------------------------\n")
232
233 return result
234
235
237 """Run metadata check."""
238
239 def __init__(self, setup: TestSetup, input_format: str) -> None:
240 super().__init__(setup)
241 self.format = input_format
242
243 def run(self, test: WorkflowTest) -> bool:
244 self.logger.info("---------------------------------------------------------------------------------------")
245 self.logger.info(f"Running {test.ID} metadata check on {self.format}")
246
247 file_name = f"my{self.format}.pool.root"
248 if test.type == WorkflowType.Derivation:
249 file_name = f"{self.format}.myOutput.pool.root"
250
251 reference_file = self.reference_file(test, file_name)
252 if reference_file is None:
253 self.logger.error(f"Reference file {file_name} not found")
254 return False
255
256 self.logger.info(f"Reading the reference file from location {reference_file}")
257
258 exclusion_list = " ".join([
259 "file_guid", "file_size",
260 "/TagInfo/AtlasRelease", "FileMetaData/productionRelease",
261 ] + ([
262 "auto_flush",
263 "StreamDAOD_PHYS/eventTypes", "StreamDAOD_PHYSLITE/eventTypes",
264 ] if test.type is WorkflowType.Derivation else []))
265
266 validation_file = test.validation_path / file_name
267 log_file = test.validation_path / f"meta-diff-{test.ID}.{self.format}.log"
268
269 comparison_command = f"meta-diff --ordered -m full -x diff {reference_file} {validation_file} --drop {exclusion_list} --ignoreTrigger > {log_file} 2>&1"
270 output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
271 output, error = output.decode("utf-8"), error.decode("utf-8")
272
273 result = True
274 with log_file.open() as file:
275 for line in file:
276 if line.strip():
277 result = False
278
279 if result:
280 self.logger.info("Passed!\n")
281 else:
282 # print CI helper directly to avoid logger decorations
283 if self.setup.disable_release_setup:
284 self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
285 self.logger.print("")
286
287 self.logger.error(f"Your change breaks the frozen tier0 policy in test {test.ID}.")
288 self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
289
290 # copy the artifacts
291 if self.setup.disable_release_setup:
292 comparison_command = f"CopyCIArtifact.sh {validation_file}"
293 output, error = subprocess.Popen(["/bin/bash", "-c", comparison_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
294 output, error = output.decode("utf-8"), error.decode("utf-8")
295
296 if error or not output:
297 self.logger.error(f"Tried copying '{validation_file}' to the CI artifacts area but it failed.")
298 self.logger.error(f" {error.strip()}")
299 else:
300 self.logger.error(output)
301
302 with log_file.open() as file:
303 for line in file:
304 self.logger.info(f" {line.strip()}")
305 self.logger.info("-----------------------------------------------------\n")
306
307 return result
308
310 """Run AOD Content Check."""
311
312 def run(self, test: WorkflowTest) -> bool:
313 self.logger.info("---------------------------------------------------------------------------------------")
314 self.logger.info(f"Running {test.ID} AOD content check")
315
316 file_name = "myAOD.pool.root"
317 output_name = f"{test.ID}_AOD_content.txt"
318
319 validation_file = test.validation_path / file_name
320 validation_output = test.validation_path / output_name
321 validation_command = f"acmd.py chk-file {validation_file} | awk '/---/{{flag=1;next}}/===/{{flag=0}}flag' | awk '{{print $10}}' | LC_ALL=C sort | uniq > {validation_output}"
322
323 output_val, error_val = subprocess.Popen(["/bin/bash", "-c", validation_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
324 output_val, error_val = output_val.decode("utf-8"), error_val.decode("utf-8")
325 if error_val:
326 self.logger.error(f"Something went wrong with retrieving the content for test {test.ID}:")
327 self.logger.error(error_val)
328
329 # Read references
330 if self.setup.validation_only:
331 # try to get the reference
332 reference_path = test.validation_path
333 reference_output_name = f"{test.ID}_AOD_content.ref"
334 reference_output = reference_path / reference_output_name
335 subprocess.Popen(["/bin/bash", "-c", f"cd {reference_path}; get_files -remove -data {reference_output_name}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
336 if not reference_output.exists():
337 self.logger.info(f"No reference file '{reference_output_name}' to compare the content with.")
338 return True
339 else:
340 reference_path = test.reference_path
341 reference_output = reference_path / output_name
342 reference_file = reference_path / file_name
343
344 reference_command = f"acmd.py chk-file {reference_file} | awk '/---/{{flag=1;next}}/===/{{flag=0}}flag' | awk '{{print $10}}' | LC_ALL=C sort | uniq > {reference_output}"
345 subprocess.Popen(["/bin/bash", "-c", reference_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
346
347 # Remove HLT containers in some cases
348 extra_diff_args = ""
349 if test.type == WorkflowType.MCReco or test.type == WorkflowType.MCPileUpReco:
350 extra_diff_args = "-I '^HLT' -I '^LVL1' -I '^L1'"
351
352 # Compute the diff
353 diff_output, diff_error = subprocess.Popen(["/bin/bash", "-c", f"diff {extra_diff_args} {reference_output} {validation_output}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
354 diff_output, diff_error = diff_output.decode("utf-8"), diff_error.decode("utf-8")
355
356 result = False
357 if not diff_output and not diff_error:
358 self.logger.info("Passed!\n")
359 result = True
360 else:
361 # print CI helper directly to avoid logger decorations
362 if self.setup.disable_release_setup:
363 self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
364 self.logger.print("")
365
366 self.logger.error(f"Your change modifies the output in test {test.ID}.")
367 self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
368 if self.setup.validation_only:
369 self.logger.error(f"The output '{output_name}' (>) differs from the reference '{reference_output_name}' (<):")
370 else:
371 self.logger.error(f"The output '{validation_output}' (>) differs from the reference '{reference_output}' (<):")
372 if diff_output:
373 self.logger.print("")
374 self.logger.print(diff_output)
375 if diff_error:
376 self.logger.print(diff_error)
377 self.logger.info("-----------------------------------------------------\n")
378
379 return result
380
381
383 """Run AOD Digest Check."""
384
385 def __init__(self, setup: TestSetup, max_events: int = -1) -> None:
386 super().__init__(setup)
387 self.max_events = str(max_events)
388
389 def run(self, test: WorkflowTest) -> bool:
390 self.logger.info("---------------------------------------------------------------------------------------")
391 self.logger.info(f"Running {test.ID} AOD digest")
392
393 file_name = "myAOD.pool.root"
394 output_name = f"{test.ID}_AOD_digest.txt"
395
396 validation_file = test.validation_path / file_name
397 validation_output = test.validation_path / output_name
398 validation_log_file = test.validation_path / f"AODdigest-{test.ID}.log"
399 validation_command = f"xAODDigest.py {validation_file} {validation_output} > {validation_log_file} 2>&1"
400
401 output_val, error_val = subprocess.Popen(["/bin/bash", "-c", validation_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
402 output_val, error_val = output_val.decode("utf-8"), error_val.decode("utf-8")
403 if error_val:
404 self.logger.error(f"Something went wrong with the digest calculation for test {test.ID}:")
405 self.logger.error(error_val)
406
407 # Read references
408 if self.setup.validation_only:
409 # try to get the reference
410 reference_path = test.validation_path
411 reference_output_name = f"{test.ID}_AOD_digest.ref"
412 reference_output = reference_path / reference_output_name
413 subprocess.Popen(["/bin/bash", "-c", f"cd {reference_path}; get_files -remove -data {reference_output_name}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
414 if not reference_output.exists():
415 self.logger.info(f"No reference file '{reference_output_name}' to compare the digest with. Printing the full digest:")
416 with validation_output.open() as f:
417 for line in f:
418 self.logger.print(f" {line.strip()}")
419 return True
420 else:
421 reference_path = test.reference_path
422 reference_output = reference_path / output_name
423 reference_file = reference_path / file_name
424 reference_log_file = test.reference_path / f"AODdigest-{test.ID}.log"
425
426 reference_command = f"xAODDigest.py {reference_file} {reference_output} > {reference_log_file} 2>&1"
427 subprocess.Popen(["/bin/bash", "-c", reference_command], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
428
429 # Compute the diff
430 diff_output, diff_error = subprocess.Popen(["/bin/bash", "-c", f"diff {reference_output} {validation_output}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
431 diff_output, diff_error = diff_output.decode("utf-8"), diff_error.decode("utf-8")
432
433 result = False
434 if not diff_output and not diff_error:
435 self.logger.info("Passed!\n")
436 result = True
437 else:
438 # print CI helper directly to avoid logger decorations
439 if self.setup.disable_release_setup:
440 self.logger.print(f"ATLAS-CI-ADD-LABEL: {test.run.value}-{test.type.value}-output-changed")
441 self.logger.print("")
442
443 self.logger.error(f"Your change breaks the digest in test {test.ID}.")
444 self.logger.error("Please make sure this has been discussed in the correct meeting (RIG or Simulation) meeting and approved by the relevant experts.")
445 if self.setup.validation_only:
446 self.logger.error(f"The output '{output_name}' (>) differs from the reference '{reference_output_name}' (<):")
447 else:
448 self.logger.error(f"The output '{validation_output}' (>) differs from the reference '{reference_output}' (<):")
449 if diff_output:
450 with reference_output.open() as file:
451 self.logger.print(file.readline())
452 self.logger.print(diff_output)
453 if diff_error:
454 self.logger.print(diff_error)
455 self.logger.info("-----------------------------------------------------\n")
456
457 return result
458
459
461 """Run A Very Simple Check."""
462
463 def __init__(self, setup: TestSetup, name: str, quantity: str, unit: str, field: int, threshold: float):
464 super().__init__(setup)
465 self.name = name
466 self.quantity = quantity
467 self.unit = unit
468 self.field = field
469 self.threshold = threshold
470
471 def run(self, test: WorkflowTest) -> bool:
472 self.logger.info("-----------------------------------------------------")
473 self.logger.info(f"Running {test.ID} {self.name} Check")
474
475 result = True
476 for step in test.steps:
477 log_name = f"log.{step}"
478 reference_log = test.reference_path / log_name
479 validation_log = test.validation_path / log_name
480
481 reference_value = 0
482 with reference_log.open() as file:
483 found = False
484 for line in file:
485 if self.quantity in line:
486 reference_value = float(line.split()[self.field])
487 found = True
488 break
489 if not found:
490 self.logger.error(f"No data available in {reference_log}. Job failed.")
491 return False
492
493 validation_value = 0
494 with validation_log.open() as file:
495 found = False
496 for line in file:
497 if self.quantity in line:
498 validation_value = float(line.split()[self.field])
499 found = True
500 break
501 if not found:
502 self.logger.error(f"No data available in {validation_log}. Job failed.")
503 return False
504
505 if reference_value != 0:
506 factor = validation_value / reference_value
507
508 # Error if the factor increases (very bad things)
509 # Warning if the factor decreases (should be an understood feature)
510 if factor > 1. + self.threshold:
511 self.logger.error(f"{self.quantity} in the {step} step with(out) your change is {validation_value} ({reference_value}) {self.unit}")
512 self.logger.error(f"Your change changes {self.quantity} by a factor {factor}")
513 self.logger.error("Is this an expected outcome of your change(s)?")
514 result = False
515 self.logger.error(f"{step}: {self.name}")
516 self.logger.error(f"ref {reference_value} {self.unit}")
517 self.logger.error(f"val {validation_value} {self.unit}")
518 if factor < 1. - self.threshold:
519 self.logger.warning(f"{self.quantity} in the {step} step with(out) your change is {validation_value} ({reference_value}) {self.unit}")
520 self.logger.warning(f"Your change changes {self.quantity} by a factor {factor}")
521 self.logger.warning("Is this an expected outcome of your change(s)?")
522 result = True
523 self.logger.warning(f"{step}: {self.name}")
524 self.logger.warning(f"ref {reference_value} {self.unit}")
525 self.logger.warning(f"val {validation_value} {self.unit}")
526
527 if result:
528 self.logger.info("Passed!\n")
529 else :
530 self.logger.error("Failed!\n")
531
532 return result
533
534
536 """Run WARNINGS check."""
537
538 def run(self, test: WorkflowTest):
539 self.logger.info("-----------------------------------------------------")
540 self.logger.info(f"Running {test.ID} WARNINGS Check\n")
541
542 result = True
543 for step in test.steps:
544 log_name = f"log.{step}"
545 reference_log = test.reference_path / log_name
546 validation_log = test.validation_path / log_name
547 warnings_reference = warnings_count(reference_log)
548 warnings_validation = warnings_count (validation_log)
549
550 wr=[]
551 for w in warnings_reference:
552 wr.append(w[9:])
553 wv=[]
554 for w in warnings_validation:
555 wv.append(w[9:])
556
557 wn = list(set(wv)-set(wr))
558 wo = list(set(wr)-set(wv))
559
560
561 if len(warnings_validation) > len(warnings_reference):
562 self.logger.error(f"Validation log file {validation_log} has {len(warnings_validation) - len(warnings_reference)} more warning(s) than the reference log file {reference_log}")
563 self.logger.error("Please remove the new warning message(s):")
564 for w in wn:
565 self.logger.error(w)
566 result = False
567 elif len(warnings_validation) < len(warnings_reference):
568 self.logger.info(f"Validation log file {validation_log} has {len(warnings_reference) - len(warnings_validation)} less warnings than the reference log file {reference_log}")
569 self.logger.info("The reduction of unnecessary WARNINGs is much appreciated. Is it expected?")
570 self.logger.info("The following warning messages have been removed:")
571 for w in wo:
572 self.logger.info(w)
573 result = True
574 else :
575 self.logger.info(f"Validation log file {validation_log} has the same number of warnings as the reference log file {reference_log}")
576 result = True
577
578 if result:
579 self.logger.info("Passed!\n")
580 else :
581 self.logger.error("Failed!\n")
582
583 return result
584
585
587 """Run FPE check."""
588
589 # Ignore FPEs for these tests:
590 ignoreTestRuns = []
591 ignoreTestTypes = [WorkflowType.FullSim, WorkflowType.AF3]
592 ignoreTestIDs = ['x686']
593
594 def run(self, test: WorkflowTest):
595 self.logger.info("-----------------------------------------------------")
596 self.logger.info(f"Running {test.ID} FPE Check")
597
598 result = True
599 for step in test.steps:
600 log = test.validation_path / f"log.{step}"
601 fpes = {}
602 stack_traces = {}
603 with log.open() as file:
604 last_stack_trace = None
605 for line in file:
606 if "WARNING FPE" in line:
607 last_stack_trace = None
608 fpe = None
609 for part in reversed(line.split()):
610 if "[" in part:
611 fpe = part.strip().replace("[", "").replace("]", "")
612 break
613 if fpe:
614 if fpe in fpes:
615 fpes[fpe] += 1
616 else:
617 fpes[fpe] = 1
618 last_stack_trace = []
619 stack_traces[fpe] = last_stack_trace
620 elif "FPE stacktrace" in line and last_stack_trace is not None:
621 line = next(file)
622 last_stack_trace.append(line.strip()[9:])
623
624 if fpes.keys():
625 msgLvl = logging.WARNING if test.run in self.ignoreTestRuns or test.type in self.ignoreTestTypes or test.ID in self.ignoreTestIDs else logging.ERROR
626 result = False
627 self.logger.log(msgLvl, f" {step} validation test step FPEs")
628 for fpe, count in sorted(fpes.items(), key=lambda item: item[1]):
629 self.logger.log(msgLvl, f"{count:>5} {fpe}")
630 for fpe in fpes.keys():
631 self.logger.log(msgLvl, "-----------------------------------------------------")
632 self.logger.log(msgLvl, f" first stack trace for algorithm {fpe}:")
633 for line in stack_traces[fpe]:
634 self.logger.log(msgLvl, line)
635 self.logger.log(msgLvl, "-----------------------------------------------------")
636
637 if result:
638 self.logger.info("Passed!\n")
639 elif test.run in self.ignoreTestRuns or test.type in self.ignoreTestTypes or test.ID in self.ignoreTestIDs:
640 self.logger.warning("Failed!")
641 self.logger.warning("Check disabled due to irreproducibilities!\n")
642 result = True
643 else:
644 self.logger.error("Failed!\n")
645
646 return result
void print(char *figname, TCanvas *c1)
bool run(self, WorkflowTest test)
Definition Checks.py:312
None __init__(self, TestSetup setup, int max_events=-1)
Definition Checks.py:385
bool run(self, WorkflowTest test)
Definition Checks.py:389
run(self, WorkflowTest test)
Definition Checks.py:594
bool run(self, WorkflowTest test)
Definition Checks.py:14
bool run(self, WorkflowTest test)
Definition Checks.py:114
None __init__(self, TestSetup setup, str input_format, int max_events)
Definition Checks.py:108
bool run(self, WorkflowTest test)
Definition Checks.py:243
None __init__(self, TestSetup setup, str input_format)
Definition Checks.py:239
__init__(self, TestSetup setup, str name, str quantity, str unit, int field, float threshold)
Definition Checks.py:463
bool run(self, WorkflowTest test)
Definition Checks.py:471
run(self, WorkflowTest test)
Definition Checks.py:538
Optional[Path] reference_file(self, "WorkflowTest" test, str file_name)
STL class.
std::string replace(std::string s, const std::string &s2, const std::string &s3)
Definition hcg.cxx:310