ATLAS Offline Software
beamspotman.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 
3 # Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
4 
5 
6 """
7 beamspotman is a command line utility to do typical beam spot related tasks.
8 """
9 __authors__ = ['Juerg Beringer', 'Carl Suster']
10 __version__ = 'beamspotman.py atlas/athena'
11 __usage__ = '''%prog [options] command [args ...]
12 
13 Commands are:
14 
15 show RUNNR For given run, show data on CAF and beam spot jobs
16 run RUNNR TAG Run standard beam spot reconstruction job
17 runCmd DSNAME TASKNAME CMD Repeat command for matching tasks (use %(DSNAME)s and
18  %(TASKNAME)s in the command string CMD to specify the
19  dataset and task names, if needed)
20 runMon RUNNR TAG Run standard beam spot monitoring job
21 runMonJobs [MAXTIME] Run monitoring jobs for jobs completed at most MAXTIME ago
22 postproc Default postprocessing for any task that needs it
23 postproc DSNAME TASKNAME [WHATLIST] Postprocessing for a selected set of tasks
24 upload DBFILE Upload SQLite file into COOL (independent of task)
25 upload DSNAME TASKNAME Upload result of beam spot determination into COOL
26 dq2get DSNAME TASKNAME Retrieve task data from grid job (must have set up grid env.)
27 queryT0 DSNAME TASKNAME Query Tier-0 database about a task
28 backup DIR Backup directory DIR (may contain wildcards) to EOS
29 archive DSNAME TASKNAME Archive task data (deletes files after copying to EOS,
30  sets on-disk status to ARCHIVED)
31 lsbackup List contents of backup directory on EOS
32 reproc TEMPLATE DSNAME TASKNAME INPUTDIR Run reprocessing task consisting of several jobs split into 5 LBs
33  over the files in INPUTDIR
34 runaod TEMPLATE DSNAME TASKNAME INPUTDIR Run over AOD, splitting jobs into sets of N LBs (similar
35  to reproc command, except for variable params)
36 resubmit DSNAME TASKNAME Rerun jobs of a specific task (choose queue with -q QUEUE)
37 dqflag DBFILE Upload DQ SQLite file into COOL (independent of task)
38 dqflag DSNAME TASKNAME Upload result of beam spot DQ flag determination into COOL
39 runBCID RUNNR TAG Run standard beam spot BCID job
40 runBCIDJobs [MAXTIME] Run BCID jobs for jobs completed at most MAXTIME ago
41 mctag STATUS POSX POSY POSZ Create an sqlite file containing a MC tag with the
42  SIGMAX SIGMAY SIGMAZ parameters given (and zero errors). A la beamSpot_set.py.
43  [TILTX TILTY SIGMAXY]
44 
45 
46 '''
47 
48 proddir = '/afs/cern.ch/user/a/atlidbs/jobs'
49 produserfile = '/afs/cern.ch/user/a/atlidbs/private/produsers.dat'
50 prodcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/coolinfo.dat'
51 flaskcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/flaskinfo.dat'
52 proddqcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/cooldqinfo.dat'
53 tier0dbinfofile = '/afs/cern.ch/user/a/atlidbs/private/t0dbinfo.dat'
54 beamspottag = ''
55 backuppath = '/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/backup'
56 archivepath = '/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/archive'
57 
58 import sys, os, stat
59 import re
60 import subprocess
61 import time
62 from copy import copy
63 
65 from InDetBeamSpotExample.PostProcessing import doPostProcessing
66 from InDetBeamSpotExample import BeamSpotPostProcessing
67 from InDetBeamSpotExample import COOLUtils
68 from InDetBeamSpotExample import DiskUtils
69 
70 
71 from optparse import Option, OptionParser, OptionGroup
72 def check_commsep(option, opt, value):
73  return re.split(r'\s*,\s*|\s+', value)
74 class BeamSpotOption(Option):
75  TYPES = Option.TYPES + ('commsep',)
76  TYPE_CHECKER = copy(Option.TYPE_CHECKER)
77  TYPE_CHECKER['commsep'] = check_commsep
78 
79 parser = OptionParser(usage=__usage__, version=__version__, option_class=BeamSpotOption)
80 
81 g_input = OptionGroup(parser, 'Input file options')
82 g_input.add_option('', '--eos', dest='eos', default=False, action='store_true', help='access files over EOS (not needed if /eos is mounted)')
83 g_input.add_option('-e', '--eospath', dest='eospath', default='/eos/atlas/atlastier0/rucio', help='eos path (excluding project and stream name)')
84 g_input.add_option('-p', '--project', dest='project', default='data17_13TeV', help='project name')
85 g_input.add_option('-s', '--stream', dest='stream', default='calibration_BeamSpot', help='stream name')
86 g_input.add_option('-f', '--filter', dest='filter', default=r'.*\.AOD\..*', help='regular expression to filter input files')
87 g_input.add_option('', '--lbfilemap', dest='lbfilemap', default='', help='text file with mapping between filename and lumi blocks')
88 g_input.add_option('', '--rucio', dest='rucio', action='store_true', default=False, help='rucio directory structure')
89 g_input.add_option('', '--dpdinput', dest='dpdinput', action='store_true', default=False, help='Run over DPD for runaod')
90 g_input.add_option('-l', '--filelist', dest='filelist', default=None, help='Explicit list of files for reproc command')
91 g_input.add_option('', '--removedups', dest='removedups', action='store_true', default=False, help='Remove duplicate retry files of form root.N, keeping the latest')
92 parser.add_option_group(g_input)
93 
94 g_mode = OptionGroup(parser, 'Mode flags')
95 g_mode.add_option('-b', '--batch', dest='batch', action='store_true', default=False, help='batch mode - never ask for confirmation')
96 g_mode.add_option('', '--test', dest='testonly', action='store_true', default=False, help='for runaod, show only options and input files')
97 g_mode.add_option('', '--expertmode', dest='expertmode', action='store_true', default=False, help='expert mode (BE VERY CAREFUL)')
98 g_mode.add_option('', '--ignoremode', dest='ignoremode', default='', help='ignore update protection mode (needs expert pwd)')
99 parser.add_option_group(g_mode)
100 
101 # Other options:
102 parser.add_option('-t', '--beamspottag', dest='beamspottag', default=beamspottag, help='beam spot tag')
103 parser.add_option('-d', '--dbconn', dest='dbconn', default='', help='task manager database connection string (default: check TASKDB, otherwise use sqlite_file:taskdata.db)')
104 parser.add_option('', '--proddir', dest='proddir', default=proddir, help='production directory (default: %s' % proddir)
105 parser.add_option('', '--destdbname', dest='destdbname', default='CONDBR2', help='destination database instance name (default: CONDBR2)')
106 parser.add_option('', '--srcdbname', dest='srcdbname', default='BEAMSPOT', help='source database instance name (default: BEAMSPOT)')
107 parser.add_option('', '--srctag', dest='srctag', default='nominal', help='source tag (default: nominal)')
108 parser.add_option('-r', '--runjoboptions', dest='runjoboptions', default='InDetBeamSpotExample/VertexTemplate.py', help='template to run beam spot jobs')
109 parser.add_option('', '--runtaskname', dest='runtaskname', default='VTX', help='task name')
110 parser.add_option('-m', '--monjoboptions', dest='monjoboptions', default='runBeamSpotMonitor.py', help='template to run monitoring jobs')
111 parser.add_option('', '--bcidjoboptions', dest='bcidjoboptions', default='InDetBeamSpotExample/VertexTemplate.py', help='template to run BCID jobs')
112 parser.add_option('', '--montaskname', dest='montaskname', default='MON', help='task name')
113 parser.add_option('', '--bcidtaskname', dest='bcidtaskname', default='BCID', help='BCID task name')
114 parser.add_option('-g', '--griduser', dest='griduser', default='user10.JuergBeringer', help='grid user name prefix (e.g. user10.JuergBeringer)')
115 parser.add_option('-n', '--nowildcards', dest='nowildcards', action='store_true', default=False, help='do not add wildcards when looking up dataset and task names')
116 parser.add_option('-x', '--excludeiftask', dest='excludeiftask', default='', help='Exclude running cmd with runCmd if such task exists already')
117 parser.add_option('', '--excludeds', dest='excludeds', default='', help='Exclude running cmd with runCmd for dataset containing a certain string')
118 parser.add_option('', '--archive', dest='archive', action='store_true', default=False, help='archive, ie delete data after backup')
119 parser.add_option('', '--incremental', dest='incremental', action='store_true', default=False, help='incremental backup')
120 parser.add_option('', '--lbperjob', dest='lbperjob', type='int', default=5, help='number of luminosity blocks per job (default: 5)')
121 parser.add_option('', '--params', dest='params', default='', help='job option parameters to pass to job option template')
122 parser.add_option('-z', '--postprocsteps', dest='postprocsteps', type='commsep', default=['JobPostProcessing'], help='Task-level postprocessing steps, comma-separated (Default: JobPostProcessing)')
123 parser.add_option('', '--srcdqtag', dest='srcdqtag', default='nominal', help='source DQ tag (default: nominal)')
124 parser.add_option('', '--dqtag', dest='dqtag', default='HEAD', help='beam spot DQ tag')
125 parser.add_option('', '--destdqdbname', dest='destdqdbname', default='CONDBR2', help='DQ destination database instance name (default: CONDBR2)')
126 parser.add_option('', '--srcdqdbname', dest='srcdqdbname', default='IDBSDQ', help='DQ source database instance name (default: IDBSDQT)')
127 parser.add_option('', '--dslist', dest='dslist', default='', help='Exclude running cmd with runCmd if the dataset is not in this file')
128 parser.add_option('', '--nominbias', dest='nominbias', action='store_true', default=False, help='overwrite MinBias_physics in DSNAME with express_express')
129 parser.add_option('', '--resultsondisk', dest='resultsondisk', action='store_true', default=False, help='Leave the results on disk when archiving')
130 parser.add_option('', '--pLbFile', dest='pseudoLbFile', default=None, help='File for pseudo LB info from scan')
131 parser.add_option('', '--prefix', dest='prefix', default='', help='Prefix for reading files from mass storage (Default: determine from filename (`\'\') ')
132 parser.add_option('', '--rl', dest='runMin', type='int', default=None, help='Minimum run number for mctag (inclusive)')
133 parser.add_option('', '--ru', dest='runMax', type='int', default=None, help='Maximum run number for mctag (inclusive)')
134 parser.add_option('', '--useRun', dest='useRun', type='int', default=None, help='Run monitoring job for a given run only')
135 parser.add_option('', '--noCheckAcqFlag', dest='noCheckAcqFlag', action='store_true', default=False, help='Don\'t check acqFlag when submitting VdM jobs')
136 parser.add_option('', '--resubAll', dest='resubAll', action='store_true', default=False, help='Resubmit all jobs irrespective of status')
137 parser.add_option('', '--resubRunning', dest='resubRunning', action='store_true', default=False, help='Resubmit all "running" jobs')
138 parser.add_option('-q', '--queue', dest='batch_queue', default=None, help='Name of batch queue to use (default is context-specific)')
139 
140 g_deprecated = OptionGroup(parser, 'Deprecated Options')
141 g_deprecated.add_option('', '--mon', dest='legacy_mon', action='store_true', default=False, help='mon directory structure (now inferred from montaskname)')
142 parser.add_option_group(g_deprecated)
143 
144 (options, args) = parser.parse_args()
145 if len(args) < 1: parser.error('wrong number of command line arguments')
146 cmd = args[0]
147 cmdargs = args[1:]
148 
149 # General error checking (skipped in expert mode to allow testing)
150 if not options.expertmode:
151  if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
152  sys.exit('ERROR: You must run this command in the production directory %s' % options.proddir)
153  if not os.path.exists(produserfile):
154  sys.exit('ERROR: Authorization file unreadable or does not exists %s' % produserfile)
155  if not subprocess.getoutput('grep `whoami` %s' % produserfile):
156  sys.exit('ERROR: You are not authorized to run this command (user name must be listed in produser file %s)' % produserfile)
157 else:
158  if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
159  print ('WARNING: You are not running in the production directory %s' % options.proddir)
160 
161 
162 #
163 # Utilities
164 #
166  try:
167  with open(tier0dbinfofile, 'r') as dbinfofile:
168  connstring = dbinfofile.read().strip()
169  except:
170  sys.exit('ERROR: Unable to read connection information for Tier-0 database')
171  dbtype, dbname = connstring.split(':',1)
172  if dbtype!='oracle':
173  sys.exit('ERROR: Invalid T0 connection string')
174  import cx_Oracle
175  try:
176  oracle = cx_Oracle.connect(dbname)
177  except:
178  print ('ERROR: First connection attempt to Tier-0 Oracle database failed; will retry in 10s ...')
179  try:
180  time.sleep(10)
181  oracle = cx_Oracle.connect(dbname)
182  except Exception as e:
183  print (e)
184  sys.exit('ERROR: Unable to connect to Tier-0 Oracle database')
185  if not oracle:
186  sys.exit('ERROR: Unable to connect to Tier-0 Oracle database (invalid cx_Oracle connection)')
187  return oracle
188 
190  ''' Open task manager (used by all following commands, at the very least through subcommands) '''
191  try:
192  return TaskManager(options.dbconn)
193  except:
194  print ('ERROR: Unable to access task manager database %s' % options.dbconn)
195  sys.exit(1)
196 
197 def fail(message):
198  print()
199  print ('ERROR:', message)
200  sys.exit(1)
201 
203  ''' Given a run number and tag, check input dataset and work out name. '''
204  fs = DiskUtils.FileSet.from_ds_info(run,
205  project=options.project,
206  stream=options.stream,
207  base=options.eospath)
208  datasets = list(fs
209  .strict_mode()
210  .use_files_from(options.filelist)
211  .matching(options.filter + tag + '.*')
212  .excluding(r'.*\.TMP\.log.*')
213  .only_single_dataset())
214  dataset = os.path.dirname(datasets[0])
215  dsname = '.'.join(os.path.basename(datasets[0]).split('.')[:3])
216  return (dataset, dsname)
217 
218 def run_jobs(script, ds_name, task_name, params, *args):
219  ''' Invoke runJobs.py '''
220  arg_list = ['runJobs']
221  arg_list.extend(map(str, args))
222  if params:
223  param_args = []
224  for k,v in params.items():
225  param_args.append("{}={}".format(k,repr(v)))
226  arg_list.extend(['--params', ', '.join(param_args)])
227  if options.testonly:
228  arg_list.append('--test')
229  arg_list.extend([script, ds_name, task_name])
230 
231  print (subprocess.list2cmdline(arg_list))
232  subprocess.check_call(arg_list)
233 
234 #
235 # Upload any SQLite file to COOL (independent of task, w/o book keeping)
236 #
237 if cmd == 'upload' and len(cmdargs) == 1:
238  dbfile = args[1]
239  if not options.beamspottag:
240  fail('No beam spot tag specified')
241  try:
242  with open(prodcoolpasswdfile, 'r') as passwdfile:
243  passwd = passwdfile.read().strip()
244  except:
245  fail('Unable to determine COOL upload password')
246 
247  try:
248  with open(flaskcoolpasswdfile, 'r') as flaskpasswdfile:
249  flaskpasswd = flaskpasswdfile.read().strip()
250  except:
251  fail('Unable to determine FLASK upload password')
252 
253  print()
254  print ('Beam spot file: ', dbfile)
255  print ('Uploading to tag: ', options.beamspottag)
256  os.system('dumpBeamSpot.py -d %s -t %s %s' % (
257  options.srcdbname,
258  options.srctag,
259  dbfile))
260 
261  print()
262  stat = os.system('/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (
263  flaskpasswd,
264  '--batch' if options.batch else '',
265  ('--ignoremode %s' % options.ignoremode) if options.ignoremode else '',
266  options.srctag,
267  options.beamspottag,
268  options.destdbname,
269  dbfile,
270  options.srcdbname,
271  passwd))
272 
273  if stat: fail("UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!")
274  sys.exit(0)
275 
276 #
277 # Run beam spot reconstruction jobs
278 #
279 if cmd=='run' and len(args)==3:
280  run = args[1]
281  tag = args[2]
282  dataset, dsname = dataset_from_run_and_tag(run, tag)
283 
284  run_jobs(options.runjoboptions, dsname,
285  '{}-LR5.{}'.format(options.runtaskname, tag),
286  {
287  'LumiRange' : 5,
288  },
289  '--files-per-job', 0,
290  '--match', options.filter,
291  '--exclude', r'.*\.TMP\.log.*',
292  '--directory', dataset)
293  sys.exit(0)
294 
295 
296 #
297 # Run beam spot monitoring job
298 #
299 if cmd=='runMon' and len(args)==3:
300  run = args[1]
301  tag = args[2]
302 
303  if not options.beamspottag:
304  fail('No beam spot tag specified')
305 
306  dataset, dsname = dataset_from_run_and_tag(run, tag)
307 
308  # NOTE: The command below may be executed via a cron job, so we need set STAGE_SVCCLASS
309  # explicitly in all cases, since it may not be inherited from the environment.
310  if 'ESD' in options.filter:
311  # NOTE: We pass along the filter setting, but currently we can do --lbperjob only for ESD since for
312  # other data sets we have only the merged files.
313  run_jobs(options.monjoboptions, dsname,
314  '{}.{}'.format(options.montaskname, tag),
315  {
316  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
317  'useBeamSpot' : True,
318  'beamspottag' : options.beamspottag,
319  },
320  '--lbperjob', 10,
321  '--match', options.filter,
322  '--exclude', r'.*\.TMP\.log.*',
323  '--directory', dataset)
324  elif options.filelist != None:
325  lbinfoinfiles = True
326  for line in open(options.filelist, 'r'):
327  if "lb" not in line:
328  lbinfoinfiles = False
329  break
330  lboptions='--lbperjob=10' if lbinfoinfiles else '--files-per-job=10'
331  run_jobs(options.monjoboptions, dsname, '{}.{}'.format(options.montaskname, tag),
332  {
333  'tracksAlreadyOnBeamLine' : True,
334  'useBeamSpot' : True,
335  'beamspottag' : options.beamspottag,
336  },
337  lboptions,
338  '--match', options.filter,
339  '--exclude', r'.*\.TMP\.log.*',
340  '--directory', dataset,
341  '--queue', '"tomorrow"')
342  else:
343  queue = options.batch_queue or '"tomorrow"'
344  print('Queue: ', queue )
345 
346  params = {
347  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
348  'useBeamSpot' : True,
349  'beamspottag' : options.beamspottag
350  }
351 
352  # Additional job parameters
353  for s in options.params.split(', '):
354  if s:
355  try:
356  p = s.split('=',1)
357  params[p[0].strip()] = eval(p[1].strip())
358  except:
359  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
360 
361  run_jobs(options.monjoboptions, dsname, '{}.{}'.format(options.montaskname, tag),params,
362  '--lbperjob', 20,
363  '--match', options.filter,
364  '--exclude', r'.*\.TMP\.log.*',
365  '--directory', dataset,
366  '--queue', queue)
367 
368  sys.exit(0)
369 
370 
371 #
372 # Backup to EOS
373 #
374 if cmd=='backup' and len(args)==2:
375  if options.archive and options.incremental:
376  sys.exit('ERROR: Cannot do incremental archiving')
377  tmplist = glob.glob(args[1]+'/')
378  dirlist = []
379  for dslash in tmplist:
380  d = dslash[:-1] # Remove trailing /
381  if options.incremental:
382  baklog = d+'/backup.log'
383  if os.path.exists(baklog):
384  cmd = 'find %s -maxdepth 1 -newer %s' % (d,baklog)
385  (status,output) = subprocess.getstatusoutput(cmd)
386  status = status >> 8
387  if status:
388  sys.exit("ERROR: Error executing %s" % cmd)
389  if output:
390  dirlist.append(d)
391  else:
392  dirlist.append(d)
393  pass
394  else:
395  dirlist.append(d)
396  print ('\nFound %i directories for backup:\n' % len(dirlist))
397  for d in dirlist:
398  print (' %s' % d)
399  if options.archive:
400  print ('\n****************************************************************')
401  print ('WARNING: ARCHIVING - DIRECTORIES WILL BE DELETED AFTER BACKUP!!!')
402  print ('****************************************************************')
403  if not options.batch:
404  a = input('\nARE YOU SURE [n] ? ')
405  if a!='y':
406  sys.exit('ERROR: Aborted by user')
407  for d in dirlist:
408  tmpdir = '/tmp'
409  if options.archive:
410  outname = d.replace('/','-')+time.strftime('-%G_%m_%d.tar.gz')
411  path = archivepath
412  else:
413  outname = d.replace('/','-')+'.tar.gz'
414  path = backuppath
415 
416  print ('\nBacking up %s --> %s/%s ...\n' % (d,path,outname))
417 
418  status = os.system('tar czf %s/%s %s' % (tmpdir,outname,d)) >> 8
419  if status:
420  sys.exit('\nERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
421 
422  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
423 
424  if status:
425  os.system('rm -rf %s/%s' % (path,outname) )
426  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
427  if status:
428  # Continue to try other files if one failed to upload to EOS
429  print ('\nERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
430  continue
431 
432  os.system('rm %s/%s' % (tmpdir,outname))
433  status = os.system('echo "`date` %s/%s" >> %s/backup.log' % (path,outname,d)) >> 8
434  if status:
435  sys.exit('\nERROR: Could not update backup log file')
436 
437  if options.archive:
438  print ('\nWARNING: DELETION OF SOURCE FILES NOT YET IMPLEMENTED\n')
439  sys.exit(0)
440 
441 
442 #
443 # List backup directory on EOS
444 #
445 if cmd == 'lsbackup' and not cmdargs:
446  path = archivepath if options.archive else backuppath
447  print ('Backup directory:', path)
448  for f in DiskUtils.from_directory(path):
449  print (f)
450  sys.exit(0)
451 
452 
453 #
454 # Show available data sets
455 #
456 if cmd == 'show' and len(cmdargs)==1:
457  run = cmdargs[0]
458 
459  print ('Base path: ', options.eospath)
460  print ('Project tag: ', options.project)
461  print ('Stream: ', options.stream)
462  print()
463  print ('Files available (filtered by: {}):'.format(options.filter))
464  print ('---------------')
465 
466  fs = DiskUtils.FileSet.from_ds_info(run,
467  project=options.project,
468  stream=options.stream,
469  base=options.eospath)
470  for f in fs.matching(options.filter):
471  print (f)
472 
473  print()
474  print ('Beam spot tasks:')
475  print ('---------------')
476  with getTaskManager() as taskman:
477  for t in taskman.taskIterDict(qual=["where DSNAME like '%%%s%%' order by UPDATED" % run]):
478  print ('%-45s %-45s %4s job(s), last update %s' % (t['DSNAME'],t['TASKNAME'],t['NJOBS'],time.ctime(t['UPDATED'])))
479  sys.exit(0)
480 
481 
482 #
483 # Postprocessing: for all tasks requiring it, or for a selected set of tasks
484 #
485 if cmd == 'postproc' and not cmdargs:
486  with getTaskManager() as taskman:
487  for t in taskman.taskIterDict(
488  qual=['where STATUS > %i and STATUS <= %i order by UPDATED' % (
489  TaskManager.StatusCodes['RUNNING'],
490  TaskManager.StatusCodes['POSTPROCESSING'])]):
491  doPostProcessing(taskman, t, t['TASKPOSTPROCSTEPS'].split(), BeamSpotPostProcessing)
492  sys.exit(0)
493 
494 if cmd == 'postproc' and len(cmdargs) in [2,3]:
495  dsname = cmdargs[0]
496  taskname = cmdargs[1]
497 
498  # for backwards compatibility these are equivalent:
499  # $0 postproc DSNAME TASKNAME POSTPROCSTEPS
500  # $0 -z POSTPROCSTEPS postproc DSNAME TASKNAME
501  steps = options.postprocsteps if len(cmdargs) < 3 else cmdargs[2].split(',')
502  if steps:
503  print ('Executing postprocessing tasks:', steps)
504  else:
505  print ('Executing postprocessing tasks as specified in task database')
506  print()
507 
508  with getTaskManager() as taskman:
509  try:
510  taskList = getFullTaskNames(taskman,
511  dsname,
512  taskname,
513  confirmWithUser=not options.batch,
514  addWildCards=not options.nowildcards)
515  except TaskManagerCheckError as e:
516  fail(e)
517  for taskName in taskList:
518  t = taskman.getTaskDict(taskName[0], taskName[1])
519  if steps:
520  doPostProcessing(taskman, t, steps, BeamSpotPostProcessing, forceRun=True)
521  else:
522  doPostProcessing(taskman, t, t['TASKPOSTPROCSTEPS'].split(), BeamSpotPostProcessing, forceRun=True)
523  sys.exit(0)
524 
525 
526 #
527 # Upload beam spot data for given task to COOL
528 #
529 if cmd=='upload' and len(args)==3:
530  with getTaskManager() as taskman:
531  try:
532  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
533  except TaskManagerCheckError as e:
534  print (e)
535  sys.exit(1)
536  if not options.beamspottag:
537  sys.exit('ERROR: No beam spot tag specified')
538 
539  dbfile = glob.glob('%s/%s/*-beamspot.db' % (dsname,task))
540  if len(dbfile)!=1:
541  print ('ERROR: Missing or ambiguous input COOL file:',dbfile)
542  sys.exit()
543 
544  try:
545  with open(prodcoolpasswdfile, 'r') as passwdfile:
546  passwd = passwdfile.read().strip()
547  except:
548  sys.exit('ERROR: Unable to determine COOL upload password')
549 
550  try:
551  with open(flaskcoolpasswdfile, 'r') as flaskpasswdfile:
552  flaskpasswd = flaskpasswdfile.read().strip()
553  except:
554  fail('Unable to determine FLASK upload password')
555 
556  print ('\nData set: ',dsname)
557  print ('Beam spot file: ',dbfile[0])
558  print ('Uploading to tag: ',options.beamspottag)
559  os.system('dumpBeamSpot.py -d %s -t %s %s' % (options.srcdbname,options.srctag,dbfile[0]))
560 
561  if options.ignoremode:
562  ignoremode = '--passopt="--appendlocked --ignoremode %s"' % options.ignoremode
563  else:
564  ignoremode = ''
565  if options.batch:
566  batchmode = '--batch'
567  else:
568  batchmode = ''
569 
570  print('command: /afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret <flaskpassword> --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W <password>' % (batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname))
571  stat = os.system('/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (flaskpasswd,batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname,passwd))
572 
573  if stat:
574  print ("\n\nERROR: UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
575  sys.exit(1)
576 
577  # Record upload information, both in task DB (field COOLTAGS) and in upload flag file
578  uploadflag = dbfile[0]+'.uploaded'
579  stat = os.system('echo "`date` %s" >> %s' % (options.beamspottag,uploadflag) )
580  if stat:
581  print ("ERROR: Uploading was successful, but unable to set upload flag", uploadflag )
582 
583  cooltags = options.beamspottag
584 
585  # If uploading to the beamspot tag linked to Current BLK alias and Next BLK alias aslo exists (not '')
586  # and points to different tag then AtlCoolMerge will upload to the Next beamspot tag too. Hence record
587  # that in COOLTAGS too. Mimik logic in AtlCoolMergeLib.py
588 
589  nextbeamspot = ''
590  try:
591  nextbeamspot = COOLUtils.resolveNextBeamSpotFolder()
592  except:
593  nextbeamspot = ''
594  if nextbeamspot == options.beamspottag:
595  nextbeamspot = ''
596  if not ('UPD' in options.beamspottag and 'UPD' in nextbeamspot):
597  nextbeamspot = ''
598  if nextbeamspot != '':
599  print ('Additionally uploading to Next tag: ',nextbeamspot)
600  cooltags = appendUnique(cooltags, nextbeamspot)
601 
602  # Upload tag(s) to TaskManager
603  t = taskman.getTaskDict(dsname,task)
604  taskman.setValue(dsname,task,'COOLTAGS',appendUnique(t['COOLTAGS'],cooltags))
605 
606  sys.exit(0)
607 
608 
609 #
610 # Retrieve task data from grid job
611 #
612 if cmd=='dq2get' and len(args)==3:
613  try:
614  with getTaskManager() as taskman:
615  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,addWildCards=not options.nowildcards)
616  except TaskManagerCheckError as e:
617  print (e)
618  sys.exit(1)
619  dir = os.path.join(dsname, task)
620  griddsname = '%s.%s-%s' % (options.griduser,dsname,task)
621  path = os.path.join(dir, griddsname)
622  if os.path.exists(path):
623  print ('ERROR: Path exists already:',path)
624  sys.exit(1)
625  print ('Extracting into %s/%s ...' % (dir,griddsname))
626  stat = os.system('cd %s; dq2-get -T 6,6 %s' % (dir,griddsname))
627  if stat:
628  print ("ERROR: Problem occurred while extracting data set - task status not changed")
629 
630  statfile = glob.glob('%s/000/*.status.SUBMITTED' % (dir))
631  if len(statfile)!=1:
632  print ("ERROR: Unable to uniquely identfying status - giving up. Candidate status files are:")
633  print (statfile)
634  sys.exit(1)
635  os.system('rm %s' % statfile[0])
636  basename = statfile[0][:-17]
637  os.system('touch %s.exit.0' % basename)
638  os.system('touch %s.COMPLETED' % basename)
639  print ("\nSuccessfully downloaded data set",griddsname)
640  os.system('du -hs %s' % path)
641  print()
642  sys.exit(0)
643 
644 
645 #
646 # Query T0 task status
647 #
648 if cmd=='queryT0' and len(args)==3:
649  try:
650  with getTaskManager() as taskman:
651  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,addWildCards=not options.nowildcards)
652  except TaskManagerCheckError as e:
653  print (e)
654  sys.exit(1)
655  tags = task.split('.')[-1]
656  # FIXME: change the following to automatically determine from the input files of the DB_BEAMSPOT job?
657  if 'ESD' in options.filter:
658  t0TaskName = '%s.recon.ESD.%s.beamspotproc.task' % (dsname,tags)
659  else:
660  if any(t[0] == 'm' for t in tags.split('_')):
661  t0TaskName = '%s.merge.AOD.%s.beamspotproc.task' % (dsname,tags)
662  else:
663  t0TaskName = '%s.recon.AOD.%s.beamspotproc.task' % (dsname,tags)
664 
665  print ('Querying Tier-0 database for task',t0TaskName,'...')
666  oracle = getT0DbConnection()
667  cur = oracle.cursor()
668  try:
669  #sql = str("SELECT status FROM etask WHERE taskname='%s' AND creationtime>sysdate-10" % t0TaskName) # Oracle doesn't want unicode
670  sql = str("SELECT status FROM tasks WHERE taskname='%s' AND tasktype='beamspotproc'" % t0TaskName) # Oracle doesn't want unicode
671  cur.execute(sql)
672  r = cur.fetchall()
673  except Exception as e:
674  print (e)
675  sys.exit('ERROR: Unable to retrieve status of task %s' % t0TaskName)
676  if len(r)==0:
677  sys.exit('ERROR: No such task found: %s' % t0TaskName)
678  if len(r)>1:
679  sys.exit('ERROR: %i tasks found - unable to determine task status' % len(r))
680  status = r[0][0]
681  print ('\nTask status = %s\n' % status)
682  if status=='FINISHED' or status=='TRUNCATED':
683  sys.exit(0)
684  else:
685  sys.exit(2)
686 
687 
688 #
689 # Run command over set of matching tasks
690 #
691 if cmd=='runCmd' and len(args)==4:
692  dssel = args[1]
693  tasksel = args[2]
694  cmd = args[3]
695  if options.nowildcards:
696  qual = ["where DSNAME like '%s' and TASKNAME like '%s'" % (dssel,tasksel)]
697  else:
698  qual = ["where DSNAME like '%%%s%%' and TASKNAME like '%%%s%%'" % (dssel,tasksel)]
699  if options.beamspottag:
700  qual.append("and COOLTAGS like '%%%s%%'" % options.beamspottag)
701  #qual.append("order by RUNNR desc")
702  qual.append("order by RUNNR")
703  print ('Running command\n')
704  print (' ',cmd % ({'DSNAME': 'DSNAME', 'TASKNAME': 'TASKNAME', 'RUNNR': 'RUNNR', 'FULLDSNAME': 'FULLDSNAME'}))
705  print ('\nover the following datasets / tasks:\n')
706  print (' %-10s %-40s %s' % ('Run','Dataset','Task'))
707  print (' %s' % (74*'-'))
708 
709  taskList = []
710  with getTaskManager() as taskman:
711  for t in taskman.taskIterDict('*',qual):
712  dsname = t['DSNAME']
713 
714  # Deal with express vs minBias in 900 GeV runs
715  if options.nominbias and dsname.find('physics_MinBias') :
716  print ('Warning: changing physics_MinBias in dsname to express_express')
717  dsname = dsname.replace('physics_MinBias','express_express')
718  t['DSNAME'] = dsname
719 
720  runnr = t['RUNNR']
721  taskname = t['TASKNAME']
722  print (' %-10s %-40s %s'% (runnr,dsname,taskname))
723 
724  if options.excludeiftask:
725  if options.nowildcards:
726  # n = taskman.getNTasks(['where DSNAME =',DbParam(dsname),'and TASKNAME like',DbParam(options.excludeiftask)])
727  n = taskman.getNTasks(["where DSNAME ='%s' and TASKNAME like '%s'" % (dsname,options.excludeiftask)])
728  else:
729  n = taskman.getNTasks(["where DSNAME ='%s' and TASKNAME like '%%%s%%'" % (dsname,options.excludeiftask)])
730  if n!=0:
731  print (' ==> SKIPPING - %i matching tasks found' % n)
732  continue
733 
734  if options.excludeds:
735  excludeList = options.excludeds.split(',')
736  for s in excludeList:
737  if s in dsname:
738  print (' ==> SKIPPING dataset')
739  continue
740 
741  if options.dslist:
742  stat, out = subprocess.getstatusoutput('grep -c %s %s' % (dsname, options.dslist))
743  if stat==2: # Missing file etc
744  print (out)
745  sys.exit(1)
746 
747  try:
748  if int(out)==0:
749  print (' ==> SKIPPING - dataset %s not found in %s' % (dsname, options.dslist))
750  continue
751  except ValueError: # Some other error
752  print (out)
753  sys.exit(1)
754 
755  del stat, out
756  # # Find full dtasetname from file
757  stat, out = subprocess.getstatusoutput('grep %s %s' % (dsname, options.dslist))
758  if stat==2: # Missing file etc
759  print (out)
760  sys.exit(1)
761 
762  t['FULLDSNAME'] = out.strip()
763 
764  taskList.append(t)
765 
766  if not taskList:
767  print ('\nNo jobs need to be run.\n')
768  sys.exit(0)
769 
770  print ('\n%i datasets / tasks selected.\n' % len(taskList))
771  if not options.batch:
772  a = input('\nARE YOU SURE [n] ? ')
773  if a!='y':
774  sys.exit('ERROR: Running of monitoring tasks aborted by user')
775  print()
776  for t in taskList:
777  fullcmd = cmd % t
778  print ('\nExecuting: ',fullcmd,' ...')
779  sys.stdout.flush()
780  os.system(fullcmd)
781 
782  print()
783  sys.exit(0)
784 
785 
786 #
787 # Run monitoring jobs for jobs completed at most MAXTIME ago
788 #
789 if cmd=='runMonJobs' and len(args)<3:
790  if len(args)==2:
791  earliestUpdateTime = time.time()-float(args[1])
792  else:
793  earliestUpdateTime = 0
794  if not options.beamspottag:
795  sys.exit('ERROR: No beam spot tag specified')
796 
797  print ('Running the following monitoring tasks for tasks of type %s:\n' % options.runtaskname)
798  print (' %-10s %s' % ('RUN','MONITORING TASK'))
799  print (' %s' % (60*'-'))
800 
801  onDiskCode = TaskManager.OnDiskCodes['ALLONDISK']
802  taskList = []
803  with getTaskManager() as taskman:
804  for t in taskman.taskIterDict('*',["where TASKNAME like '%s%%'" % options.runtaskname,
805  "and UPDATED >= ", DbParam(earliestUpdateTime),
806  "and ONDISK = ", DbParam(onDiskCode),
807  "and COOLTAGS like '%%%s%%'" % options.beamspottag,
808  "order by RUNNR desc"]):
809  dsname = t['DSNAME']
810  runnr = t['RUNNR']
811  taskName = t['TASKNAME']
812  datatag = taskName.split('.')[-1].split('_')[0]
813  monTaskName = 'MON.%s.%s' % (taskName,datatag)
814  useRun = True
815  if options.useRun is not None:
816  useRun = False
817  if runnr == options.useRun:
818  useRun = True
819 
820  if useRun:
821  try:
822  m = next(taskman.taskIterDict('*',["where RUNNR =",DbParam(runnr),"and DSNAME =",DbParam(dsname),"and TASKNAME =",DbParam(monTaskName),"order by UPDATED desc"]))
823  print (' %-10s %s %s'% (runnr,dsname,monTaskName))
824  except:
825  print (' * %-10s %s %s'% (runnr,dsname,'--- no monitoring task found ---'))
826  taskList.append(t)
827  pass
828 
829  if not taskList:
830  print ('\nNo jobs need to be run.\n')
831  sys.exit(0)
832 
833  if not options.batch:
834  a = input('\nARE YOU SURE [n] ? ')
835  if a!='y':
836  sys.exit('ERROR: Running of monitoring tasks aborted by user')
837  print()
838 
839  oracle = getT0DbConnection()
840  for t in taskList:
841  dsname = t['DSNAME']
842  runnr = t['RUNNR']
843  ptag = dsname.split('.')[0]
844  stream = dsname.split('.')[2]
845  taskName = t['TASKNAME']
846  fulldatatag = taskName.split('.')[-1]
847  datatag = fulldatatag.split('_')[0]
848  monTaskName = 'MON.%s' % (taskName)
849 
850  # Now that beamspot folder tag can change, the tag to be monitored must be the one the parent task uploaded to.
851  # If more than one tag (might be case if upload to current/next) then take first (should be same)
852  cooltags = t['COOLTAGS']
853  if not cooltags: cooltags = ''
854  bstag = cooltags.split()[0]
855 
856  filter = 'AOD'
857  if any(t[0] == 'm' for t in fulldatatag.split('_')):
858  t0dsname = '%s.merge.AOD.%s%%' % (dsname, datatag)
859  else:
860  t0dsname = '%s.recon.AOD.%s%%' % (dsname, datatag)
861 
862  c = getJobConfig('.',dsname,taskName)
863  if 'ESD' in c['inputfiles'][0]:
864  filter = 'ESD'
865  t0dsname = '%s.recon.ESD.%s' % (dsname, datatag)
866 
867  print ('\nRunning monitoring job for run %s:' % runnr)
868 
869  submitjob=True
870  eospath=options.eospath
871  # for old runs we would need to check if the dataset had been replicated at Tier-0.
872  # with EOS this is no longer necessary.
873  r=[]
874  if int(runnr)<240000:
875  print ('... Querying T0 database for replication of %s' % t0dsname)
876  cur = oracle.cursor()
877  cur.execute("select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
878  r = cur.fetchall()
879  if not r:
880  print (' WARNING: input data not yet replicated - please retry later')
881  submitjob=False
882  else:
883  print ('... Querying T0 database for completion of merging jobs of %s' % t0dsname)
884  cur = oracle.cursor()
885  origt0TaskName='%s.recon.AOD.%s%%.aodmerge.task' % (dsname,datatag)
886  cur.execute("select status from tasks where taskname like '%s' and tasktype='aodmerge'" % origt0TaskName)
887  r = cur.fetchall()
888  if not r:
889  print (' WARNING: can\'t get status of merge job for %s, running on un-merged samples instead' % origt0TaskName)
890  eospath='/eos/atlas/atlastier0/tzero/prod'
891  elif not (r[0][0]=='FINISHED' or r[0][0]=='TRUNCATED'):
892  print (' Merge job for taskname %s is not finished yet, has status %s, running on un-merged samples instead.' % (origt0TaskName, r[0][0]))
893  eospath='/eos/atlas/atlastier0/tzero/prod'
894  else:
895  print (' Merge job is finished, launching jobs.')
896  submitjob=True
897 
898  if submitjob:
899  if int(runnr)<240000:
900  print (' ',r)
901  print ('... Submitting monitoring task')
902  queue = options.batch_queue or '\'\"tomorrow\"\''
903  paramValues = ''
904  if options.params:
905  paramValues = '--params \''+options.params+'\''
906  testFlag = ''
907  if options.testonly:
908  testFlag = '--test'
909 
910  cmd = 'beamspotman --eospath=%s -p %s -s %s -f \'.*\\.%s\\..*\' -t %s --queue %s %s %s --montaskname %s runMon %i %s' % (eospath,ptag,stream,filter,bstag,queue,paramValues,testFlag,monTaskName,int(runnr),datatag)
911  print (cmd)
912  sys.stdout.flush()
913  status = os.system(cmd) >> 8 # Convert to standard Unix exit code
914  if status:
915  print ('\nERROR: Job submission returned error - exit code %i\n')
916 
917  print()
918  sys.exit(0)
919 
920 
921 #
922 # Archive task to EOS
923 #
924 if cmd=='archive' and len(args)==3:
925  if not options.batch:
926  print ('\nWARNING: If you confirm below, each of the following datasets will:')
927  print (' - be archived to EOS')
928  if options.resultsondisk:
929  print (' - will be marked as RESULTSONDISK in the task database')
930  print (' - all except the results files *** WILL BE DELETED ***')
931  else:
932  print (' - will be marked as ARCHIVED in the task database')
933  print (' - all its files *** WILL BE DELETED ***')
934  print()
935 
936  with getTaskManager() as taskman:
937  try:
938  taskList = getFullTaskNames(taskman,args[1],args[2],confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
939  except TaskManagerCheckError as e:
940  print (e)
941  sys.exit(1)
942 
943  tmpdir = '/tmp'
944  path = archivepath
945  onDiskCode = TaskManager.OnDiskCodes.get('ALLONDISK',None)
946  archivedCode = TaskManager.OnDiskCodes.get('RESULTSONDISK',None) if options.resultsondisk else TaskManager.OnDiskCodes.get('ARCHIVED',None)
947  exceptList = ['*dqflags.txt', '*.gif', '*.pdf', '*.config.py*', '*.argdict.gpickle', '*.AveBeamSpot.log', '*.PlotBeamSpotCompareReproc.log', '*.sh', '*.BeamSpotNt*', '*.BeamSpotGlobalNt.log', '*.status.*', '*.exit.*']
948 
949  for (dsname,taskname) in taskList:
950  t = taskman.getTaskDict(dsname,taskname)
951 
952  # Check that task files are still on disk, so we're not overwriting an earlier archive
953  if t['ONDISK'] != onDiskCode:
954  print ('Skipping task %s / %s status %s (task files must be on disk)' % (dsname,taskname,getKey(TaskManager.OnDiskCodes,t['ONDISK'])))
955  print()
956  continue
957 
958  dir = '%s/%s' % (dsname,taskname)
959  outname = dir.replace('/','-')+time.strftime('-%G_%m_%d.tar.gz')
960  print ('Archiving task %s / %s ...' % (dsname,taskname))
961  print (' --> %s/%s ...' % (path,outname))
962 
963  # Paranoia check against later catastrophic delete
964  if dir=='.' or dir=='*':
965  print ('\n**** FATAL ERROR: Very dangerous value of task directory found: %s - ABORTING' % dir)
966  sys.exit(1)
967 
968  # If expected directory exists, tar up files, write to EOS, mark as archived, and delete
969  if os.path.exists(dir):
970  status = os.system('tar czf %s/%s %s' % (tmpdir,outname,dir)) >> 8
971  if status:
972  sys.exit('\n**** ERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
973  status = os.system('xrdcp %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
974  if status:
975  os.system('rm -rf %s/%s' % (path,outname) )
976  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
977  if status:
978  sys.exit('\n**** ERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
979 
980  os.system('rm %s/%s' % (tmpdir,outname))
981  n = taskman.setValue(dsname,taskname,'ONDISK',archivedCode)
982  if n!=1:
983  sys.exit('\n**** ERROR: Unexpected number of tasks modified: %i instead of 1 (DSNAME=%s,TASKNAME=%s)' % (n,dsname,taskname))
984 
985  if options.resultsondisk:
986  oscmd = r"find %s ! \‍( -name '%s' \‍) -type f -exec rm {} \;" % (dir, "' -or -name '".join(exceptList))
987  os.system(oscmd)
988  else:
989  os.system('rm -rf %s' % dir)
990  else:
991  print ('\n**** ERROR: No task directory',dir,'\n')
992 
993  print()
994  sys.exit(0)
995 
996 #
997 # Run beam spot resubmit failed jobs
998 # Double check directory structure is appropriate for you
999 #
1000 
1001 if cmd=='resubmit' and len(args) in [3,4]:
1002 
1003  dsname = args[1]
1004  taskname = args[2]
1005 
1006  # for backwards compatibility these are equivalent:
1007  # $0 resubmit DSNAME TASKNAME QUEUE
1008  # $0 -q QUEUE resubmit DSNAME TASKNAME
1009  queue = args[3] if len(args) == 4 else options.batch_queue
1010  if not queue:
1011  print ('ERROR: No queue was specified (use -q)')
1012  sys.exit(1)
1013 
1014  basepath = os.path.join(os.getcwd(), dsname, taskname)
1015  dircontents = os.listdir(basepath)
1016 
1017  condorScriptTemplate="""executable = %(scriptfile)s
1018 arguments = $(ClusterID) $(ProcId)
1019 output = %(logfile)s.out
1020 error = %(logfile)s.err
1021 log = %(logfile)s.log
1022 universe = vanilla
1023 +JobFlavour = %(batchqueue)s
1024 queue
1025 """
1026 
1027 
1028  for dir in dircontents:
1029  if not os.path.isdir(os.path.join(basepath, dir)):
1030  continue
1031  print (dir)
1032  jobname = dir
1033  if (options.montaskname in taskname.split('.')) or options.legacy_mon:
1034  jobname = '-'.join([dsname, taskname, 'lb' + dir])
1035  fullpath = os.path.join(basepath, dir)
1036 
1037  isRunning = False
1038  isFailed = False
1039  for f in os.listdir(fullpath):
1040  if re.search('RUNNING', f):
1041  isRunning = True
1042  if re.search('COMPLETED',f) or re.search('POSTPROCESSING',f):
1043  with open(os.path.join(fullpath, jobname + '.exitstatus.dat')) as statusFile:
1044  status = statusFile.read(1)
1045  print (status)
1046  if status != "0":
1047  isFailed = True
1048  isRunning = False
1049  if options.resubRunning and isRunning:
1050  print ("Will resubmit running job")
1051  elif (isRunning or not isFailed) and not options.resubAll:
1052  continue
1053 
1054 
1055  for f in os.listdir(fullpath):
1056  if re.search('.exitstatus.', f):
1057  os.remove(os.path.join(fullpath, f))
1058  elif re.search('.exit.', f):
1059  os.remove(os.path.join(fullpath, f))
1060  elif re.search('.status.', f):
1061  os.remove(os.path.join(fullpath, f))
1062  elif re.search('.log', f):
1063  os.remove(os.path.join(fullpath, f))
1064  elif re.search('.py.final.py', f):
1065  os.remove(os.path.join(fullpath, f))
1066 
1067  jobConfig = {
1068  'test': 'this',
1069  'batchqueue' : queue,
1070  'jobname' : jobname,
1071  'jobdir' : fullpath,
1072  }
1073  jobConfig['logfile'] = '%(jobdir)s/%(jobname)s.log' % jobConfig
1074  jobConfig['scriptfile'] = '%(jobdir)s/%(jobname)s.sh' % jobConfig
1075 
1076  condorScript = condorScriptTemplate % jobConfig
1077  print (condorScript)
1078  script = open('condorSubmit.sub','w')
1079  script.write(condorScript)
1080  script.close()
1081  os.chmod('condorSubmit.sub',0o755)
1082  #batchCmd = 'bsub -L /bin/bash -q %(batchqueue)s -J %(jobname)s -o %(logfile)s %(scriptfile)s' % jobConfig
1083  batchCmd = 'condor_submit condorSubmit.sub'
1084 
1085  print (batchCmd)
1086  os.system(batchCmd)
1087 
1088  with getTaskManager() as taskman:
1089  print (taskman.getStatus(dsname, taskname))
1090  taskman.setStatus(dsname, taskname, TaskManager.StatusCodes['RUNNING'] )
1091 
1092  sys.exit(0)
1093 
1094 
1095 #
1096 # Run beam spot reprocessing jobs
1097 # Defines one task with several jobs where the splitting into groups of 5 LBs is done by hand
1098 #
1099 if cmd=='reproc' and len(args)==5:
1100  from InDetBeamSpotExample import HTCondorJobRunner
1101 
1102  jobopts = args[1]
1103  dsname = args[2]
1104  taskname = args[3]
1105  inputdata = args[4]
1106 
1107  lbperjob = options.lbperjob
1108  params = {'LumiRange': lbperjob}
1109  cmd = ' '.join(sys.argv)
1110  files = []
1111 
1112  # Additional job parameters
1113  for s in options.params.split(', '):
1114  if s:
1115  try:
1116  p = s.split('=',1)
1117  params[p[0].strip()] = eval(p[1].strip())
1118  except:
1119  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
1120 
1121  # TODO: Is this still needed?
1122  # if options.rucio:
1123  # # Has extra subdirectories: find unique set of these from filepaths, ignoring final part (i.e. filename).
1124  # # Will also work for single directory but more time consuming.
1125  # #inputdirs = set(['/'+'/'.join(f.split('/')[4:-1])+'/' for f in files])
1126  # inputdirs = set(['/'.join(f.replace(protocol, '').split('/')[:-1])+'/' for f in files])
1127 
1128  # Make list of all files (either from explicit list or by 'globbing' dir)
1129  lbMap = {}
1130  backend = DiskUtils.EOS() if options.eos else None
1131  fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1132  fs = fs.matching(options.filter).use_files_from(options.filelist).only_latest(options.removedups)
1133  for f, lbs in file_set.with_lumi_blocks(options.lbfilemap):
1134  lbMap[f] = lbs
1135  files.append(f)
1136  if not files: fail('No files were found.')
1137 
1138  # Form bunched jobs
1139  jobFileDict = {}
1140  jobLBDict = {}
1141 
1142  # Special case to submit a single job over all files
1143  if lbperjob == -1:
1144  jobId = 1
1145  jobFileDict[jobId] = files
1146  jobLBDict[jobId] = []
1147 
1148  for f in files:
1149  try:
1150  lbs = sorted(lbMap[f.split('/')[-1]])
1151  except KeyError:
1152  sys.exit('No mapping for file %s' % f.split('/')[-1])
1153 
1154  jobLBDict[jobId].extend(lbs)
1155  else:
1156  for f in files:
1157  try:
1158  lbs = sorted(lbMap[f.split('/')[-1]])
1159  except KeyError:
1160  sys.exit('No mapping for file %s' % f.split('/')[-1])
1161 
1162  for lbnr in lbs:
1163  jobId = int((lbnr-1)/lbperjob)
1164 
1165  if not jobId in jobFileDict:
1166  jobFileDict[jobId] = [f]
1167  jobLBDict[jobId] = [lbnr]
1168  else:
1169  if not f in jobFileDict[jobId]:
1170  jobFileDict[jobId].append(f)
1171  jobLBDict[jobId].append(lbnr)
1172 
1173  # Submit bunched jobs
1174  with getTaskManager() as taskman:
1175  for i in sorted(jobFileDict.keys()):
1176  jobnr = i*lbperjob+1 # use first LB number as job number
1177  files=jobFileDict[i]
1178  lbs = sorted(set(jobLBDict[i]))
1179 
1180  #params['lbList'] = '[' + ','.join([str(l) for l in lbs]) + ']'
1181  intlbs = []
1182  for lbnr in lbs:
1183  intlbs.append(int(lbnr))
1184  params['lbList'] = intlbs
1185  jobname=dsname+'-'+taskname+'-lb%03i' % jobnr
1186 
1187  queue = options.batch_queue or '"tomorrow"'
1188  runner = HTCondorJobRunner.HTCondorJobRunner(
1189  jobnr=jobnr,
1190  jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1191  jobname=jobname,
1192  inputds='',
1193  inputfiles=files,
1194  joboptionpath=jobopts,
1195  filesperjob=len(files),
1196  batchqueue=queue,
1197  addinputtopoolcatalog=True,
1198  taskpostprocsteps='ReprocVertexDefaultProcessing',
1199  #outputfilelist=['dpd.root', 'nt.root', 'monitoring,root', 'beamspot.db'],
1200  autoconfparams='DetDescrVersion',
1201  returnstatuscode=True,
1202  comment=cmd,
1203  **params)
1204 
1205  #runner.showParams()
1206  try:
1207  runner.configure()
1208  except Exception as e:
1209  print ("ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1210  print ("DEBUG: Exception =",e)
1211  else:
1212  taskman.addTask(dsname, taskname, jobopts, runner.getParam('release'), runner.getNJobs(), runner.getParam('taskpostprocsteps'), comment=cmd)
1213  runner.run()
1214 
1215  sys.exit(0)
1216 
1217 
1218 #
1219 # Run task over AOD, bunching input files to jobs according to meta-data in input
1220 # AOD files.
1221 #
1222 
1223 if cmd=='runaod' and len(args)==5:
1224 
1225  from InDetBeamSpotExample import HTCondorJobRunner
1226  jobopts = args[1]
1227  dsname = args[2]
1228  taskname = args[3]
1229  inputdata = args[4]
1230 
1231  lbperjob = options.lbperjob
1232  params = {'LumiRange': lbperjob}
1233 
1234  # Always fit a single pLb for scans
1235  if options.pseudoLbFile:
1236  params = {'LumiRange': 1}
1237 
1238  cmd = ' '.join(sys.argv)
1239  files = []
1240 
1241  # Additional job parameters
1242  for s in options.params.split(', '):
1243  if s:
1244  try:
1245  p = s.split('=',1)
1246  params[p[0].strip()] = eval(p[1].strip())
1247  except:
1248  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
1249 
1250  lbMap = {}
1251  backend = DiskUtils.EOS() if options.eos else None
1252  fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1253  print ("****************************************************")
1254  print ("*************** printing files *********************")
1255  print (fs)
1256  fs = fs.matching(options.filter)
1257  for f, lbs in fs.with_lumi_blocks(options.lbfilemap):
1258  print (f, lbs)
1259  lbMap[f] = lbs
1260  files.append(f)
1261  if not files: fail('No files were found.')
1262 
1263  # Form bunched jobs
1264  jobFileDict = {}
1265  jobLBDict = {}
1266  jobParams = {}
1267 
1268  if options.pseudoLbFile:
1269  # Extract start and end times of real LBs
1270  from InDetBeamSpotExample.COOLUtils import COOLQuery
1271  coolQuery = COOLQuery()
1272  from InDetBeamSpotExample.Utils import getRunFromName
1273  lbTimes = coolQuery.getLbTimes( getRunFromName(dsname, None, True) )
1274 
1275  # Loop over pseudo LBs
1276  with open(options.pseudoLbFile) as pLbFile:
1277  for line in pLbFile:
1278  if line[0] == '#': continue
1279 
1280  tokens = line.split()
1281  plbnr,tstart,tend = int(tokens[0]),int(tokens[1]),int(tokens[2])
1282  jobId = int(plbnr/lbperjob)
1283 
1284  # if we're passing in a file with acqFlags, and we're checking those flags,
1285  # then skip any points that don't have acqFlag=1.0
1286  if not options.noCheckAcqFlag and len(tokens)>=5 and abs(float(tokens[4])-1.)>0.001:
1287  print ("Point is not stationary -- skipping job %d" % jobId)
1288  continue
1289 
1290  # Find real LBs covering time period of pseudo LB. Assumes pLBs in nsec
1291  rlbs = [lb for (lb,time) in lbTimes.items() if (time[0] - tend/1e9)*(time[1] - tstart/1e9) < 0]
1292 
1293  # Find files containing those real lbs
1294  filenames = []
1295  for f in files:
1296  try:
1297  lbs = sorted(lbMap[f])
1298  except KeyError:
1299  sys.exit('No mapping for file %s' % f.split('/')[-1])
1300 
1301  if not sum([lb for lb in lbs if lb in rlbs]): continue
1302 
1303  filenames.append(f)
1304  try:
1305  jobLBDict[jobId].extend([lb for lb in rlbs if not lb in jobLBDict[jobId]])
1306  jobFileDict[jobId].extend([f for f in filenames if not f in jobFileDict[jobId]])
1307  jobParams[jobId]['lbData'].append(line.strip('\n').strip())
1308  except KeyError:
1309  jobLBDict[jobId] = rlbs
1310  jobFileDict[jobId] = filenames
1311  jobParams[jobId] = {'lbData' : [line.strip('\n').strip()]}
1312 
1313  else:
1314  for f in files:
1315  try:
1316  lbs = sorted(lbMap[f])
1317  except KeyError:
1318  print ('WARNING: No mapping for file %s. Skipping' % f.split('/')[-1])
1319  continue
1320  #sys.exit('No mapping for file %s' % f.split('/')[-1])
1321 
1322  for lbnr in lbs:
1323  jobId = int((lbnr-1)/lbperjob)
1324 
1325  if not jobId in jobFileDict:
1326  jobFileDict[jobId] = [f]
1327  jobLBDict[jobId] = [lbnr]
1328  else:
1329  if not f in jobFileDict[jobId]:
1330  jobFileDict[jobId].append(f)
1331  jobLBDict[jobId].append(lbnr)
1332 
1333  # Submit bunched jobs
1334  with getTaskManager() as taskman:
1335  for i in sorted(jobFileDict.keys()):
1336  jobnr = i*lbperjob+1 # use first LB number as job number
1337  files=jobFileDict[i]
1338  lbs = sorted(set(jobLBDict[i]))
1339 
1340  intlbs = []
1341  for lbnr in lbs:
1342  intlbs.append(int(lbnr))
1343  params['lbList'] = intlbs
1344 
1345  # Allow job-by-job parameters
1346  try:
1347  p = jobParams[i]
1348  for k,v in p.items(): params[k] = v
1349  except KeyError:
1350  pass
1351 
1352  jobname=dsname+'-'+taskname+'-lb%03i' % jobnr
1353 
1354  queue = options.batch_queue
1355  if queue is None:
1356  # run on a different queue for VdM scans to avoid clogging up the normal queue
1357  queue='"tomorrow"' if options.pseudoLbFile else '"tomorrow"'
1358  runner = HTCondorJobRunner.HTCondorJobRunner(
1359  jobnr=jobnr,
1360  jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1361  jobname=jobname,
1362  inputds='',
1363  inputfiles=files,
1364  joboptionpath=jobopts,
1365  filesperjob=len(files),
1366  batchqueue=queue,
1367  addinputtopoolcatalog=True,
1368  taskpostprocsteps=' '.join(options.postprocsteps),
1369  autoconfparams='DetDescrVersion',
1370  returnstatuscode=True,
1371  comment=cmd,
1372  **params)
1373  if options.testonly:
1374  runner.showParams()
1375  else:
1376  try:
1377  runner.configure()
1378  except Exception as e:
1379  print ("ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1380  print ("DEBUG: Exception =",e)
1381  else:
1382  taskman.addTask(dsname, taskname, jobopts, runner.getParam('release'), runner.getNJobs(), runner.getParam('taskpostprocsteps'), comment=cmd)
1383  runner.run()
1384 
1385  sys.exit(0)
1386 
1387 
1388 
1389 #
1390 # Upload any DQ sqlite file to COOL (independent of task, w/o book keeping)
1391 #
1392 if cmd=='dqflag' and len(args)==2:
1393  dbfile = args[1]
1394  if not options.dqtag:
1395  sys.exit('ERROR: No beamspot DQ tag specified')
1396  try:
1397  with open(proddqcoolpasswdfile, 'r') as passwdfile:
1398  passwd = passwdfile.read().strip()
1399  except:
1400  sys.exit('ERROR: Unable to determine DQ COOL upload password')
1401 
1402  print ('\nBeam spot DQ file: ',dbfile)
1403  print ('Uploading to tag: ',options.dqtag)
1404 
1405  if options.ignoremode:
1406  ignoremode = '--ignoremode %s' % options.ignoremode
1407  else:
1408  ignoremode = ''
1409 
1410  if options.batch:
1411  batchmode = '--batch'
1412  else:
1413  batchmode = ''
1414 
1415  print()
1416  cmd = 'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1417  print (cmd)
1418 
1419  stat = os.system(cmd)
1420 
1421  # Old DQ flags
1422  #stat = os.system('/afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s --folder /GLOBAL/DETSTATUS/SHIFTOFL --tag %s --retag %s --destdb %s %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_GLOBAL_W %s' % (batchmode,ignoremode,options.srcdqtag,options.dqtag,options.destdqdbname,dbfile,options.srcdqdbname,passwd))
1423 
1424  if stat:
1425  print ("\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1426  sys.exit(1)
1427  sys.exit(0)
1428 
1429 #
1430 # Upload beam spot DQ flag for given task to COOL
1431 #
1432 if cmd=='dqflag' and len(args)==3:
1433  try:
1434  with getTaskManager() as taskman:
1435  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
1436  except TaskManagerCheckError as e:
1437  print (e)
1438  sys.exit(1)
1439 
1440  if not options.dqtag:
1441  sys.exit('ERROR: No beam spot DQ tag specified')
1442 
1443  dbfile = glob.glob('%s/%s/*-dqflags.db' % (dsname,task))
1444  if len(dbfile)!=1:
1445  print ('ERROR: Missing or ambiguous input COOL DQ file:',dbfile)
1446  sys.exit()
1447 
1448  try:
1449  with open(proddqcoolpasswdfile, 'r') as passwdfile:
1450  passwd = passwdfile.read().strip()
1451  except:
1452  sys.exit('ERROR: Unable to determine DQ COOL upload password')
1453 
1454  print ('\nData set: ',dsname)
1455  print ('Beam spot DQ file: ',dbfile[0])
1456  print ('Uploading to tag: ',options.dqtag)
1457 
1458  if options.ignoremode:
1459  ignoremode = '--ignoremode %s' % options.ignoremode
1460  else:
1461  ignoremode = ''
1462  if options.batch:
1463  batchmode = '--batch'
1464  else:
1465  batchmode = ''
1466 
1467  cmd = 'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1468  print ("Running %s" % cmd)
1469  stat = os.system(cmd)
1470 
1471  # Old DQ flags
1472  #stat = os.system('/afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s --folder /GLOBAL/DETSTATUS/SHIFTOFL --tag %s --retag %s --destdb %s %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_GLOBAL_W %s' % (batchmode,ignoremode,options.srcdqtag,options.dqtag,options.destdqdbname,dbfile[0],options.srcdqdbname,passwd))
1473  if stat:
1474  print ("\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1475  sys.exit(1)
1476 
1477  # Record upload information in upload flag file
1478  uploadflag = dbfile[0]+'.uploaded'
1479  stat = os.system('echo "`date` %s" >> %s' % (options.dqtag,uploadflag) )
1480  if stat:
1481  print ("ERROR: Uploading DQ flag was successful, but unable to set upload flag", uploadflag)
1482 
1483  sys.exit(0)
1484 
1485 #
1486 # Run beam spot BCID job
1487 #
1488 if cmd=='runBCID' and len(args)==3:
1489  run = args[1]
1490  tag = args[2]
1491 
1492  dataset, dsname = dataset_from_run_and_tag(run, tag)
1493 
1494  # NOTE: The command below may be executed via a cron job, so we need set STAGE_SVCCLASS
1495  # explicitly in all cases, since it may not be inherited from the environment.
1496  # NOTE: We pass along the filter setting, but currently we can do --lbperjob only for ESD since for
1497  # other data sets we have only the merged files.
1498  run_jobs(options.bcidjoboptions, dsname, options.bcidtaskname,
1499  {
1500  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
1501  'SeparateByBCID' : True,
1502  'VertexNtuple' : False,
1503  },
1504  '--files-per-job', 0,
1505  '--match', options.filter,
1506  '--exclude', r'.*\.TMP\.log.*',
1507  '-postprocsteps', 'BCIDDefaultProcessing')
1508 
1509  sys.exit(0)
1510 
1511 #
1512 # Run BCID jobs for jobs completed at most MAXTIME ago
1513 #
1514 if cmd=='runBCIDJobs' and len(args)<3:
1515  if len(args)==2:
1516  earliestUpdateTime = time.time()-float(args[1])
1517  else:
1518  earliestUpdateTime = 0
1519 
1520  if not options.beamspottag:
1521  sys.exit('ERROR: No beam spot tag specified')
1522 
1523  print ('Running the following BCID tasks for tasks of type %s:\n' % options.runtaskname)
1524  print (' %-10s %s' % ('RUN','BCID TASK'))
1525  print (' %s' % (60*'-'))
1526 
1527  taskList = []
1528  with getTaskManager() as taskman:
1529  for t in taskman.taskIterDict('*',["where TASKNAME like '%s%%'" % options.runtaskname,
1530  "and UPDATED >= ", DbParam(earliestUpdateTime),
1531  "and COOLTAGS like '%%%s%%'" % options.beamspottag,
1532  "order by RUNNR desc"]):
1533  dsname = t['DSNAME']
1534  runnr = t['RUNNR']
1535  taskName = t['TASKNAME']
1536  datatag = taskName.split('.')[-1].split('_')[0]
1537  bcidTaskName = 'BCID.%s.%s' % (taskName,datatag)
1538 
1539  try:
1540  m = next(taskman.taskIterDict('*',['where RUNNR =',DbParam(runnr),'and DSNAME =',DbParam(dsname),'and TASKNAME =',DbParam(bcidTaskName),'order by UPDATED desc']))
1541  print (' %-10s %s'% (runnr,bcidTaskName))
1542  except:
1543  print (' * %-10s %s'% (runnr,'--- no BCID task found ---'))
1544  taskList.append(t)
1545  pass
1546 
1547  if not taskList:
1548  print ('\nNo jobs need to be run.\n')
1549  sys.exit(0)
1550 
1551  if not options.batch:
1552  a = input('\nARE YOU SURE [n] ? ')
1553  if a!='y':
1554  sys.exit('ERROR: Running of BCID tasks aborted by user')
1555  print()
1556 
1557  oracle = getT0DbConnection()
1558  for t in taskList:
1559  dsname = t['DSNAME']
1560  runnr = t['RUNNR']
1561  ptag = dsname.split('.')[0]
1562  stream = dsname.split('.')[2]
1563  taskName = t['TASKNAME']
1564  fulldatatag = taskName.split('.')[-1].split('_')[0]
1565  datatag = taskName.split('.')[-1].split('_')[0]
1566  bcidTaskName = 'BCID.%s.%s' % (taskName,datatag)
1567 
1568  filter = 'AOD'
1569  if any(t[0] == 'm' for t in fulldatatag.split('_')):
1570  t0dsname = '%s.merge.%s.%s%%' % (dsname, filter, datatag)
1571  else:
1572  t0dsname = '%s.recon.%s.%s%%' % (dsname, filter, datatag)
1573 
1574  c = getJobConfig('.',dsname,taskName)
1575  if 'ESD' in c['inputfiles'][0]:
1576  filter = 'ESD'
1577  t0dsname = '%s.recon.ESD.%s' % (dsname, datatag)
1578 
1579  print ('\nRunning BCID job for run %s:' % runnr)
1580 
1581  print ('... Querying T0 database for replication of %s' % t0dsname)
1582  cur = oracle.cursor()
1583  cur.execute("select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
1584  r = cur.fetchall()
1585  if not r:
1586  print (' WARNING: input data not yet replicated - please retry later')
1587  else:
1588  print (' ',r)
1589  print ('... Submitting BCID task')
1590  cmd = 'beamspotman.py -p %s -s %s -f \'.*\\.%s\\..*\' --bcidtaskname %s runBCID %i %s' % (ptag,stream,filter,'BCID.'+taskName,int(runnr),datatag)
1591  print (' %s' % cmd)
1592  sys.stdout.flush()
1593  status = os.system(cmd) >> 8 # Convert to standard Unix exit code
1594  if status:
1595  print ('\nERROR: Job submission returned error - exit code %i\n')
1596 
1597  print()
1598  sys.exit(0)
1599 #
1600 # Create an sqlite file containing a MC (OFLP200) tag a la beamSpot_set.py
1601 # Need -- before positional agruments to deal with -ve values
1602 # beamspotman.py -t IndetBeampos-7TeV-PeriodD-SmallWidth-001 mctag -- 0 0.1 1.1 -5 0.045 0.048 63 0.00043 -4e-05 9e-05
1603 #
1604 if cmd=='mctag' and len(args)<12:
1605 
1606  from InDetBeamSpotExample.COOLUtils import *
1607 
1608  if not options.beamspottag:
1609  sys.exit('ERROR: No beam spot tag specified')
1610 
1611  dbfile=options.beamspottag + '.db'
1612  folderHandle = openBeamSpotDbFile(dbfile, dbName = 'OFLP200', forceNew = True)
1613 
1614  runMin = options.runMin if options.runMin is not None else 0
1615  runMax = options.runMax if options.runMax is not None else (1 << 31)-1
1616 
1617  writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1618  runMin=runMin, runMax=runMax,
1619  status=int(args[1]),
1620  posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1621  sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1622  tiltX=float(args[8]) if len(args)>8 else 0.,
1623  tiltY=float(args[9]) if len(args)>9 else 0.,
1624  sigmaXY=float(args[10]) if len(args)>10 else 0.,
1625  posXErr=0., posYErr=0., posZErr=0.,
1626  sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1627  tiltXErr=0., tiltYErr=0.,
1628  sigmaXYErr=0.)
1629 
1630  print ('* MC beamspot tag written to db=OFLP200, tag=%s in %s ' %(options.beamspottag, dbfile))
1631  print (' - AtlCoolConsole.py "sqlite://;schema=' + dbfile + ';dbname=OFLP200"')
1632  print ('* To upload to oracle use:')
1633  print (' - beamspotman.py --srctag %s -t %s --srcdbname OFLP200 --destdbname OFLP200 upload %s' %(options.beamspottag, options.beamspottag, dbfile))
1634  sys.exit(0)
1635 
1636 #
1637 # Create an sqlite file containing a Data (CONDBR2) tag a la beamSpot_set.py
1638 # Need -- before positional agruments to deal with -ve values
1639 # beamspotman.py -t IndetBeampos-7TeV-PeriodD-SmallWidth-001 maketag -- 0 0.1 1.1 -5 0.045 0.048 63 0.00043 -4e-05 9e-05
1640 #
1641 if cmd=='maketag' and len(args)<12:
1642 
1643  from InDetBeamSpotExample.COOLUtils import *
1644 
1645  if not options.beamspottag:
1646  sys.exit('ERROR: No beam spot tag specified')
1647 
1648  dbfile=options.beamspottag + '.db'
1649  dbName=options.destdbname
1650  folderHandle = openBeamSpotDbFile(dbfile, dbName = options.destdbname, forceNew = True)
1651 
1652  runMin = options.runMin if options.runMin is not None else 0
1653  runMax = options.runMax if options.runMax is not None else (1 << 31)-1
1654 
1655  writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1656  runMin=runMin, runMax=runMax,
1657  status=int(args[1]),
1658  posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1659  sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1660  tiltX=float(args[8]) if len(args)>8 else 0.,
1661  tiltY=float(args[9]) if len(args)>9 else 0.,
1662  sigmaXY=float(args[10]) if len(args)>10 else 0.,
1663  posXErr=0., posYErr=0., posZErr=0.,
1664  sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1665  tiltXErr=0., tiltYErr=0.,
1666  sigmaXYErr=0.)
1667 
1668  print ('* Beamspot tag written to db=%s, tag=%s in %s ' %(dbName,options.beamspottag, dbfile))
1669  print (' - AtlCoolConsole.py "sqlite://;schema=' + dbfile + ';dbname=%s"' %(dbName))
1670  print ('* To upload to oracle use:')
1671  print (' - beamspotman.py --srctag %s -t %s --srcdbname %s --destdbname %s upload %s' %(options.beamspottag, options.beamspottag, dbName, dbName, dbfile))
1672  print (' - /afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_INDET_W <passwd>' %(dbfile, dbName))
1673  sys.exit(0)
1674 
1675 
1676 print ('ERROR: Illegal command or number of arguments ({})'.format(' '.join(args)))
1677 sys.exit(1)
DerivationFramework::TriggerMatchingUtils::sorted
std::vector< typename R::value_type > sorted(const R &r, PROJ proj={})
Helper function to create a sorted vector from an unsorted range.
vtune_athena.format
format
Definition: vtune_athena.py:14
python.TaskManager.getJobConfig
def getJobConfig(jobDir, dsName, taskName, jobName=' *')
Definition: TaskManager.py:101
beamspotman.getT0DbConnection
def getT0DbConnection()
Definition: beamspotman.py:165
beamspotman.dataset_from_run_and_tag
def dataset_from_run_and_tag(run, tag)
Definition: beamspotman.py:202
python.TaskManager.appendUnique
def appendUnique(s, v)
Definition: TaskManager.py:64
buildDatabase.getKey
def getKey(filename)
Definition: buildDatabase.py:545
python.Utils.getRunFromName
def getRunFromName(name, default='', asInt=False)
Definition: InnerDetector/InDetExample/InDetBeamSpotExample/python/Utils.py:13
dumpHVPathFromNtuple.append
bool append
Definition: dumpHVPathFromNtuple.py:91
TaskManager
python.COOLUtils.writeBeamSpotEntry
def writeBeamSpotEntry(folderHandle, tag='nominal', runMin=0, runMax=(1<< 31) -1, lbMin=0, lbMax=(1<< 32) -2, status=0, posX=0., posY=0., posZ=0., sigmaX=30., sigmaY=30., sigmaZ=500., tiltX=0., tiltY=0., sigmaXY=0., posXErr=0., posYErr=0., posZErr=0., sigmaXErr=0., sigmaYErr=0., sigmaZErr=0., tiltXErr=0., tiltYErr=0., sigmaXYErr=0.)
Definition: COOLUtils.py:63
convertTimingResiduals.sum
sum
Definition: convertTimingResiduals.py:55
fillPileUpNoiseLumi.next
next
Definition: fillPileUpNoiseLumi.py:52
beamspotman.run_jobs
def run_jobs(script, ds_name, task_name, params, *args)
Definition: beamspotman.py:218
python.TaskManager.getFullTaskNames
def getFullTaskNames(taskman, dsname, taskname, requireSingleTask=False, confirmWithUser=False, addWildCards=True)
Definition: TaskManager.py:72
PyAthena::repr
std::string repr(PyObject *o)
returns the string representation of a python object equivalent of calling repr(o) in python
Definition: PyAthenaUtils.cxx:106
histSizes.list
def list(name, path='/')
Definition: histSizes.py:38
GlobalMonitoring.doPostProcessing
doPostProcessing
Definition: GlobalMonitoring.py:140
CxxUtils::set
constexpr std::enable_if_t< is_bitmask_v< E >, E & > set(E &lhs, E rhs)
Convenience function to set bits in a class enum bitmask.
Definition: bitmask.h:232
beamspotman.BeamSpotOption
Definition: beamspotman.py:74
print
void print(char *figname, TCanvas *c1)
Definition: TRTCalib_StrawStatusPlots.cxx:26
TCS::join
std::string join(const std::vector< std::string > &v, const char c=',')
Definition: Trigger/TrigT1/L1Topo/L1TopoCommon/Root/StringUtils.cxx:10
beamspotman.check_commsep
def check_commsep(option, opt, value)
Definition: beamspotman.py:72
COOLUtils
Trk::open
@ open
Definition: BinningType.h:40
ActsTrk::detail::MakeDerivedVariant::extend
constexpr std::variant< Args..., T > extend(const std::variant< Args... > &, const T &)
Definition: MakeDerivedVariant.h:17
python.CaloAddPedShiftConfig.int
int
Definition: CaloAddPedShiftConfig.py:45
beamspotman.getTaskManager
def getTaskManager()
Definition: beamspotman.py:189
str
Definition: BTagTrackIpAccessor.cxx:11
python.COOLUtils.openBeamSpotDbFile
def openBeamSpotDbFile(fileName, forceNew=False, folderName='/Indet/Beampos', dbName='BEAMSPOT')
Definition: COOLUtils.py:22
calibdata.copy
bool copy
Definition: calibdata.py:26
Trk::split
@ split
Definition: LayerMaterialProperties.h:38
beamspotman.fail
def fail(message)
Definition: beamspotman.py:197
python.LArMinBiasAlgConfig.float
float
Definition: LArMinBiasAlgConfig.py:65