ATLAS Offline Software
beamspotman.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 
3 # Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
4 
5 
6 """
7 beamspotman is a command line utility to do typical beam spot related tasks.
8 """
9 __authors__ = ['Juerg Beringer', 'Carl Suster']
10 __version__ = 'beamspotman.py atlas/athena'
11 __usage__ = '''%prog [options] command [args ...]
12 
13 Commands are:
14 
15 show RUNNR For given run, show data on CAF and beam spot jobs
16 run RUNNR TAG Run standard beam spot reconstruction job
17 runCmd DSNAME TASKNAME CMD Repeat command for matching tasks (use %(DSNAME)s and
18  %(TASKNAME)s in the command string CMD to specify the
19  dataset and task names, if needed)
20 runMon RUNNR TAG Run standard beam spot monitoring job
21 runMonJobs [MAXTIME] Run monitoring jobs for jobs completed at most MAXTIME ago
22 postproc Default postprocessing for any task that needs it
23 postproc DSNAME TASKNAME [WHATLIST] Postprocessing for a selected set of tasks
24 upload DBFILE Upload SQLite file into COOL (independent of task)
25 upload DSNAME TASKNAME Upload result of beam spot determination into COOL
26 dq2get DSNAME TASKNAME Retrieve task data from grid job (must have set up grid env.)
27 queryT0 DSNAME TASKNAME Query Tier-0 database about a task
28 backup DIR Backup directory DIR (may contain wildcards) to EOS
29 archive DSNAME TASKNAME Archive task data (deletes files after copying to EOS,
30  sets on-disk status to ARCHIVED)
31 lsbackup List contents of backup directory on EOS
32 reproc TEMPLATE DSNAME TASKNAME INPUTDIR Run reprocessing task consisting of several jobs split into 5 LBs
33  over the files in INPUTDIR
34 runaod TEMPLATE DSNAME TASKNAME INPUTDIR Run over AOD, splitting jobs into sets of N LBs (similar
35  to reproc command, except for variable params)
36 resubmit DSNAME TASKNAME Rerun jobs of a specific task (choose queue with -q QUEUE)
37 dqflag DBFILE Upload DQ SQLite file into COOL (independent of task)
38 dqflag DSNAME TASKNAME Upload result of beam spot DQ flag determination into COOL
39 runBCID RUNNR TAG Run standard beam spot BCID job
40 runBCIDJobs [MAXTIME] Run BCID jobs for jobs completed at most MAXTIME ago
41 mctag STATUS POSX POSY POSZ Create an sqlite file containing a MC tag with the
42  SIGMAX SIGMAY SIGMAZ parameters given (and zero errors). A la beamSpot_set.py.
43  [TILTX TILTY SIGMAXY]
44 
45 
46 '''
47 
48 proddir = '/afs/cern.ch/user/a/atlidbs/jobs'
49 produserfile = '/afs/cern.ch/user/a/atlidbs/private/produsers.dat'
50 prodcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/coolinfo.dat'
51 flaskcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/flaskinfo.dat'
52 proddqcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/cooldqinfo.dat'
53 tier0dbinfofile = '/afs/cern.ch/user/a/atlidbs/private/t0dbinfo.dat'
54 beamspottag = ''
55 backuppath = '/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/backup'
56 archivepath = '/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/archive'
57 
58 import sys, os, stat
59 import re
60 import subprocess
61 import time
62 from copy import copy
63 
65 from InDetBeamSpotExample.PostProcessing import doPostProcessing
66 from InDetBeamSpotExample import BeamSpotPostProcessing
67 from InDetBeamSpotExample import COOLUtils
68 from InDetBeamSpotExample import DiskUtils
69 
70 from future import standard_library
71 standard_library.install_aliases()
72 
73 from optparse import Option, OptionParser, OptionGroup
74 def check_commsep(option, opt, value):
75  return re.split(r'\s*,\s*|\s+', value)
76 class BeamSpotOption(Option):
77  TYPES = Option.TYPES + ('commsep',)
78  TYPE_CHECKER = copy(Option.TYPE_CHECKER)
79  TYPE_CHECKER['commsep'] = check_commsep
80 
81 parser = OptionParser(usage=__usage__, version=__version__, option_class=BeamSpotOption)
82 
83 g_input = OptionGroup(parser, 'Input file options')
84 g_input.add_option('', '--eos', dest='eos', default=False, action='store_true', help='access files over EOS (not needed if /eos is mounted)')
85 g_input.add_option('-e', '--eospath', dest='eospath', default='/eos/atlas/atlastier0/rucio', help='eos path (excluding project and stream name)')
86 g_input.add_option('-p', '--project', dest='project', default='data17_13TeV', help='project name')
87 g_input.add_option('-s', '--stream', dest='stream', default='calibration_BeamSpot', help='stream name')
88 g_input.add_option('-f', '--filter', dest='filter', default=r'.*\.AOD\..*', help='regular expression to filter input files')
89 g_input.add_option('', '--lbfilemap', dest='lbfilemap', default='', help='text file with mapping between filename and lumi blocks')
90 g_input.add_option('', '--rucio', dest='rucio', action='store_true', default=False, help='rucio directory structure')
91 g_input.add_option('', '--dpdinput', dest='dpdinput', action='store_true', default=False, help='Run over DPD for runaod')
92 g_input.add_option('-l', '--filelist', dest='filelist', default=None, help='Explicit list of files for reproc command')
93 g_input.add_option('', '--removedups', dest='removedups', action='store_true', default=False, help='Remove duplicate retry files of form root.N, keeping the latest')
94 parser.add_option_group(g_input)
95 
96 g_mode = OptionGroup(parser, 'Mode flags')
97 g_mode.add_option('-b', '--batch', dest='batch', action='store_true', default=False, help='batch mode - never ask for confirmation')
98 g_mode.add_option('', '--test', dest='testonly', action='store_true', default=False, help='for runaod, show only options and input files')
99 g_mode.add_option('', '--expertmode', dest='expertmode', action='store_true', default=False, help='expert mode (BE VERY CAREFUL)')
100 g_mode.add_option('', '--ignoremode', dest='ignoremode', default='', help='ignore update protection mode (needs expert pwd)')
101 parser.add_option_group(g_mode)
102 
103 # Other options:
104 parser.add_option('-t', '--beamspottag', dest='beamspottag', default=beamspottag, help='beam spot tag')
105 parser.add_option('-d', '--dbconn', dest='dbconn', default='', help='task manager database connection string (default: check TASKDB, otherwise use sqlite_file:taskdata.db)')
106 parser.add_option('', '--proddir', dest='proddir', default=proddir, help='production directory (default: %s' % proddir)
107 parser.add_option('', '--destdbname', dest='destdbname', default='CONDBR2', help='destination database instance name (default: CONDBR2)')
108 parser.add_option('', '--srcdbname', dest='srcdbname', default='BEAMSPOT', help='source database instance name (default: BEAMSPOT)')
109 parser.add_option('', '--srctag', dest='srctag', default='nominal', help='source tag (default: nominal)')
110 parser.add_option('-r', '--runjoboptions', dest='runjoboptions', default='InDetBeamSpotExample/VertexTemplate.py', help='template to run beam spot jobs')
111 parser.add_option('', '--runtaskname', dest='runtaskname', default='VTX', help='task name')
112 parser.add_option('-m', '--monjoboptions', dest='monjoboptions', default='runBeamSpotMonitor.py', help='template to run monitoring jobs')
113 parser.add_option('', '--bcidjoboptions', dest='bcidjoboptions', default='InDetBeamSpotExample/VertexTemplate.py', help='template to run BCID jobs')
114 parser.add_option('', '--montaskname', dest='montaskname', default='MON', help='task name')
115 parser.add_option('', '--bcidtaskname', dest='bcidtaskname', default='BCID', help='BCID task name')
116 parser.add_option('-g', '--griduser', dest='griduser', default='user10.JuergBeringer', help='grid user name prefix (e.g. user10.JuergBeringer)')
117 parser.add_option('-n', '--nowildcards', dest='nowildcards', action='store_true', default=False, help='do not add wildcards when looking up dataset and task names')
118 parser.add_option('-x', '--excludeiftask', dest='excludeiftask', default='', help='Exclude running cmd with runCmd if such task exists already')
119 parser.add_option('', '--excludeds', dest='excludeds', default='', help='Exclude running cmd with runCmd for dataset containing a certain string')
120 parser.add_option('', '--archive', dest='archive', action='store_true', default=False, help='archive, ie delete data after backup')
121 parser.add_option('', '--incremental', dest='incremental', action='store_true', default=False, help='incremental backup')
122 parser.add_option('', '--lbperjob', dest='lbperjob', type='int', default=5, help='number of luminosity blocks per job (default: 5)')
123 parser.add_option('', '--params', dest='params', default='', help='job option parameters to pass to job option template')
124 parser.add_option('-z', '--postprocsteps', dest='postprocsteps', type='commsep', default=['JobPostProcessing'], help='Task-level postprocessing steps, comma-separated (Default: JobPostProcessing)')
125 parser.add_option('', '--srcdqtag', dest='srcdqtag', default='nominal', help='source DQ tag (default: nominal)')
126 parser.add_option('', '--dqtag', dest='dqtag', default='HEAD', help='beam spot DQ tag')
127 parser.add_option('', '--destdqdbname', dest='destdqdbname', default='CONDBR2', help='DQ destination database instance name (default: CONDBR2)')
128 parser.add_option('', '--srcdqdbname', dest='srcdqdbname', default='IDBSDQ', help='DQ source database instance name (default: IDBSDQT)')
129 parser.add_option('', '--dslist', dest='dslist', default='', help='Exclude running cmd with runCmd if the dataset is not in this file')
130 parser.add_option('', '--nominbias', dest='nominbias', action='store_true', default=False, help='overwrite MinBias_physics in DSNAME with express_express')
131 parser.add_option('', '--resultsondisk', dest='resultsondisk', action='store_true', default=False, help='Leave the results on disk when archiving')
132 parser.add_option('', '--pLbFile', dest='pseudoLbFile', default=None, help='File for pseudo LB info from scan')
133 parser.add_option('', '--prefix', dest='prefix', default='', help='Prefix for reading files from mass storage (Default: determine from filename (`\'\') ')
134 parser.add_option('', '--rl', dest='runMin', type='int', default=None, help='Minimum run number for mctag (inclusive)')
135 parser.add_option('', '--ru', dest='runMax', type='int', default=None, help='Maximum run number for mctag (inclusive)')
136 parser.add_option('', '--useRun', dest='useRun', type='int', default=None, help='Run monitoring job for a given run only')
137 parser.add_option('', '--noCheckAcqFlag', dest='noCheckAcqFlag', action='store_true', default=False, help='Don\'t check acqFlag when submitting VdM jobs')
138 parser.add_option('', '--resubAll', dest='resubAll', action='store_true', default=False, help='Resubmit all jobs irrespective of status')
139 parser.add_option('', '--resubRunning', dest='resubRunning', action='store_true', default=False, help='Resubmit all "running" jobs')
140 parser.add_option('-q', '--queue', dest='batch_queue', default=None, help='Name of batch queue to use (default is context-specific)')
141 
142 g_deprecated = OptionGroup(parser, 'Deprecated Options')
143 g_deprecated.add_option('', '--mon', dest='legacy_mon', action='store_true', default=False, help='mon directory structure (now inferred from montaskname)')
144 parser.add_option_group(g_deprecated)
145 
146 (options, args) = parser.parse_args()
147 if len(args) < 1: parser.error('wrong number of command line arguments')
148 cmd = args[0]
149 cmdargs = args[1:]
150 
151 # General error checking (skipped in expert mode to allow testing)
152 if not options.expertmode:
153  if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
154  sys.exit('ERROR: You must run this command in the production directory %s' % options.proddir)
155  if not os.path.exists(produserfile):
156  sys.exit('ERROR: Authorization file unreadable or does not exists %s' % produserfile)
157  if not subprocess.getoutput('grep `whoami` %s' % produserfile):
158  sys.exit('ERROR: You are not authorized to run this command (user name must be listed in produser file %s)' % produserfile)
159 else:
160  if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
161  print ('WARNING: You are not running in the production directory %s' % options.proddir)
162 
163 
164 #
165 # Utilities
166 #
168  try:
169  with open(tier0dbinfofile, 'r') as dbinfofile:
170  connstring = dbinfofile.read().strip()
171  except:
172  sys.exit('ERROR: Unable to read connection information for Tier-0 database')
173  dbtype, dbname = connstring.split(':',1)
174  if dbtype!='oracle':
175  sys.exit('ERROR: Invalid T0 connection string')
176  import cx_Oracle
177  try:
178  oracle = cx_Oracle.connect(dbname)
179  except:
180  print ('ERROR: First connection attempt to Tier-0 Oracle database failed; will retry in 10s ...')
181  try:
182  time.sleep(10)
183  oracle = cx_Oracle.connect(dbname)
184  except Exception as e:
185  print (e)
186  sys.exit('ERROR: Unable to connect to Tier-0 Oracle database')
187  if not oracle:
188  sys.exit('ERROR: Unable to connect to Tier-0 Oracle database (invalid cx_Oracle connection)')
189  return oracle
190 
192  ''' Open task manager (used by all following commands, at the very least through subcommands) '''
193  try:
194  return TaskManager(options.dbconn)
195  except:
196  print ('ERROR: Unable to access task manager database %s' % options.dbconn)
197  sys.exit(1)
198 
199 def fail(message):
200  print()
201  print ('ERROR:', message)
202  sys.exit(1)
203 
205  ''' Given a run number and tag, check input dataset and work out name. '''
206  fs = DiskUtils.FileSet.from_ds_info(run,
207  project=options.project,
208  stream=options.stream,
209  base=options.eospath)
210  datasets = list(fs
211  .strict_mode()
212  .use_files_from(options.filelist)
213  .matching(options.filter + tag + '.*')
214  .excluding(r'.*\.TMP\.log.*')
215  .only_single_dataset())
216  dataset = os.path.dirname(datasets[0])
217  dsname = '.'.join(os.path.basename(datasets[0]).split('.')[:3])
218  return (dataset, dsname)
219 
220 def run_jobs(script, ds_name, task_name, params, *args):
221  ''' Invoke runJobs.py '''
222  arg_list = ['runJobs']
223  arg_list.extend(map(str, args))
224  if params:
225  param_args = []
226  for k,v in params.items():
227  param_args.append("{}={}".format(k,repr(v)))
228  arg_list.extend(['--params', ', '.join(param_args)])
229  if options.testonly:
230  arg_list.append('--test')
231  arg_list.extend([script, ds_name, task_name])
232 
233  print (subprocess.list2cmdline(arg_list))
234  subprocess.check_call(arg_list)
235 
236 #
237 # Upload any SQLite file to COOL (independent of task, w/o book keeping)
238 #
239 if cmd == 'upload' and len(cmdargs) == 1:
240  dbfile = args[1]
241  if not options.beamspottag:
242  fail('No beam spot tag specified')
243  try:
244  with open(prodcoolpasswdfile, 'r') as passwdfile:
245  passwd = passwdfile.read().strip()
246  except:
247  fail('Unable to determine COOL upload password')
248 
249  try:
250  with open(flaskcoolpasswdfile, 'r') as flaskpasswdfile:
251  flaskpasswd = flaskpasswdfile.read().strip()
252  except:
253  fail('Unable to determine FLASK upload password')
254 
255  print()
256  print ('Beam spot file: ', dbfile)
257  print ('Uploading to tag: ', options.beamspottag)
258  os.system('dumpBeamSpot.py -d %s -t %s %s' % (
259  options.srcdbname,
260  options.srctag,
261  dbfile))
262 
263  print()
264  stat = os.system('/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (
265  flaskpasswd,
266  '--batch' if options.batch else '',
267  ('--ignoremode %s' % options.ignoremode) if options.ignoremode else '',
268  options.srctag,
269  options.beamspottag,
270  options.destdbname,
271  dbfile,
272  options.srcdbname,
273  passwd))
274 
275  if stat: fail("UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!")
276  sys.exit(0)
277 
278 #
279 # Run beam spot reconstruction jobs
280 #
281 if cmd=='run' and len(args)==3:
282  run = args[1]
283  tag = args[2]
284  dataset, dsname = dataset_from_run_and_tag(run, tag)
285 
286  run_jobs(options.runjoboptions, dsname,
287  '{}-LR5.{}'.format(options.runtaskname, tag),
288  {
289  'LumiRange' : 5,
290  },
291  '--files-per-job', 0,
292  '--match', options.filter,
293  '--exclude', r'.*\.TMP\.log.*',
294  '--directory', dataset)
295  sys.exit(0)
296 
297 
298 #
299 # Run beam spot monitoring job
300 #
301 if cmd=='runMon' and len(args)==3:
302  run = args[1]
303  tag = args[2]
304 
305  if not options.beamspottag:
306  fail('No beam spot tag specified')
307 
308  dataset, dsname = dataset_from_run_and_tag(run, tag)
309 
310  # NOTE: The command below may be executed via a cron job, so we need set STAGE_SVCCLASS
311  # explicitly in all cases, since it may not be inherited from the environment.
312  if 'ESD' in options.filter:
313  # NOTE: We pass along the filter setting, but currently we can do --lbperjob only for ESD since for
314  # other data sets we have only the merged files.
315  run_jobs(options.monjoboptions, dsname,
316  '{}.{}'.format(options.montaskname, tag),
317  {
318  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
319  'useBeamSpot' : True,
320  'beamspottag' : options.beamspottag,
321  },
322  '--lbperjob', 10,
323  '--match', options.filter,
324  '--exclude', r'.*\.TMP\.log.*',
325  '--directory', dataset)
326  elif options.filelist != None:
327  lbinfoinfiles = True
328  for line in open(options.filelist, 'r'):
329  if "lb" not in line:
330  lbinfoinfiles = False
331  break
332  lboptions='--lbperjob=10' if lbinfoinfiles else '--files-per-job=10'
333  run_jobs(options.monjoboptions, dsname, '{}.{}'.format(options.montaskname, tag),
334  {
335  'tracksAlreadyOnBeamLine' : True,
336  'useBeamSpot' : True,
337  'beamspottag' : options.beamspottag,
338  },
339  lboptions,
340  '--match', options.filter,
341  '--exclude', r'.*\.TMP\.log.*',
342  '--directory', dataset,
343  '--queue', '"tomorrow"')
344  else:
345  queue = options.batch_queue or '"tomorrow"'
346  print('Queue: ', queue )
347 
348  params = {
349  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
350  'useBeamSpot' : True,
351  'beamspottag' : options.beamspottag
352  }
353 
354  # Additional job parameters
355  for s in options.params.split(', '):
356  if s:
357  try:
358  p = s.split('=',1)
359  params[p[0].strip()] = eval(p[1].strip())
360  except:
361  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
362 
363  run_jobs(options.monjoboptions, dsname, '{}.{}'.format(options.montaskname, tag),params,
364  '--lbperjob', 20,
365  '--match', options.filter,
366  '--exclude', r'.*\.TMP\.log.*',
367  '--directory', dataset,
368  '--queue', queue)
369 
370  sys.exit(0)
371 
372 
373 #
374 # Backup to EOS
375 #
376 if cmd=='backup' and len(args)==2:
377  if options.archive and options.incremental:
378  sys.exit('ERROR: Cannot do incremental archiving')
379  tmplist = glob.glob(args[1]+'/')
380  dirlist = []
381  for dslash in tmplist:
382  d = dslash[:-1] # Remove trailing /
383  if options.incremental:
384  baklog = d+'/backup.log'
385  if os.path.exists(baklog):
386  cmd = 'find %s -maxdepth 1 -newer %s' % (d,baklog)
387  (status,output) = subprocess.getstatusoutput(cmd)
388  status = status >> 8
389  if status:
390  sys.exit("ERROR: Error executing %s" % cmd)
391  if output:
392  dirlist.append(d)
393  else:
394  dirlist.append(d)
395  pass
396  else:
397  dirlist.append(d)
398  print ('\nFound %i directories for backup:\n' % len(dirlist))
399  for d in dirlist:
400  print (' %s' % d)
401  if options.archive:
402  print ('\n****************************************************************')
403  print ('WARNING: ARCHIVING - DIRECTORIES WILL BE DELETED AFTER BACKUP!!!')
404  print ('****************************************************************')
405  if not options.batch:
406  a = input('\nARE YOU SURE [n] ? ')
407  if a!='y':
408  sys.exit('ERROR: Aborted by user')
409  for d in dirlist:
410  tmpdir = '/tmp'
411  if options.archive:
412  outname = d.replace('/','-')+time.strftime('-%G_%m_%d.tar.gz')
413  path = archivepath
414  else:
415  outname = d.replace('/','-')+'.tar.gz'
416  path = backuppath
417 
418  print ('\nBacking up %s --> %s/%s ...\n' % (d,path,outname))
419 
420  status = os.system('tar czf %s/%s %s' % (tmpdir,outname,d)) >> 8
421  if status:
422  sys.exit('\nERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
423 
424  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
425 
426  if status:
427  os.system('rm -rf %s/%s' % (path,outname) )
428  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
429  if status:
430  # Continue to try other files if one failed to upload to EOS
431  print ('\nERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
432  continue
433 
434  os.system('rm %s/%s' % (tmpdir,outname))
435  status = os.system('echo "`date` %s/%s" >> %s/backup.log' % (path,outname,d)) >> 8
436  if status:
437  sys.exit('\nERROR: Could not update backup log file')
438 
439  if options.archive:
440  print ('\nWARNING: DELETION OF SOURCE FILES NOT YET IMPLEMENTED\n')
441  sys.exit(0)
442 
443 
444 #
445 # List backup directory on EOS
446 #
447 if cmd == 'lsbackup' and not cmdargs:
448  path = archivepath if options.archive else backuppath
449  print ('Backup directory:', path)
450  for f in DiskUtils.from_directory(path):
451  print (f)
452  sys.exit(0)
453 
454 
455 #
456 # Show available data sets
457 #
458 if cmd == 'show' and len(cmdargs)==1:
459  run = cmdargs[0]
460 
461  print ('Base path: ', options.eospath)
462  print ('Project tag: ', options.project)
463  print ('Stream: ', options.stream)
464  print()
465  print ('Files available (filtered by: {}):'.format(options.filter))
466  print ('---------------')
467 
468  fs = DiskUtils.FileSet.from_ds_info(run,
469  project=options.project,
470  stream=options.stream,
471  base=options.eospath)
472  for f in fs.matching(options.filter):
473  print (f)
474 
475  print()
476  print ('Beam spot tasks:')
477  print ('---------------')
478  with getTaskManager() as taskman:
479  for t in taskman.taskIterDict(qual=["where DSNAME like '%%%s%%' order by UPDATED" % run]):
480  print ('%-45s %-45s %4s job(s), last update %s' % (t['DSNAME'],t['TASKNAME'],t['NJOBS'],time.ctime(t['UPDATED'])))
481  sys.exit(0)
482 
483 
484 #
485 # Postprocessing: for all tasks requiring it, or for a selected set of tasks
486 #
487 if cmd == 'postproc' and not cmdargs:
488  with getTaskManager() as taskman:
489  for t in taskman.taskIterDict(
490  qual=['where STATUS > %i and STATUS <= %i order by UPDATED' % (
491  TaskManager.StatusCodes['RUNNING'],
492  TaskManager.StatusCodes['POSTPROCESSING'])]):
493  doPostProcessing(taskman, t, t['TASKPOSTPROCSTEPS'].split(), BeamSpotPostProcessing)
494  sys.exit(0)
495 
496 if cmd == 'postproc' and len(cmdargs) in [2,3]:
497  dsname = cmdargs[0]
498  taskname = cmdargs[1]
499 
500  # for backwards compatibility these are equivalent:
501  # $0 postproc DSNAME TASKNAME POSTPROCSTEPS
502  # $0 -z POSTPROCSTEPS postproc DSNAME TASKNAME
503  steps = options.postprocsteps if len(cmdargs) < 3 else cmdargs[2].split(',')
504  if steps:
505  print ('Executing postprocessing tasks:', steps)
506  else:
507  print ('Executing postprocessing tasks as specified in task database')
508  print()
509 
510  with getTaskManager() as taskman:
511  try:
512  taskList = getFullTaskNames(taskman,
513  dsname,
514  taskname,
515  confirmWithUser=not options.batch,
516  addWildCards=not options.nowildcards)
517  except TaskManagerCheckError as e:
518  fail(e)
519  for taskName in taskList:
520  t = taskman.getTaskDict(taskName[0], taskName[1])
521  if steps:
522  doPostProcessing(taskman, t, steps, BeamSpotPostProcessing, forceRun=True)
523  else:
524  doPostProcessing(taskman, t, t['TASKPOSTPROCSTEPS'].split(), BeamSpotPostProcessing, forceRun=True)
525  sys.exit(0)
526 
527 
528 #
529 # Upload beam spot data for given task to COOL
530 #
531 if cmd=='upload' and len(args)==3:
532  with getTaskManager() as taskman:
533  try:
534  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
535  except TaskManagerCheckError as e:
536  print (e)
537  sys.exit(1)
538  if not options.beamspottag:
539  sys.exit('ERROR: No beam spot tag specified')
540 
541  dbfile = glob.glob('%s/%s/*-beamspot.db' % (dsname,task))
542  if len(dbfile)!=1:
543  print ('ERROR: Missing or ambiguous input COOL file:',dbfile)
544  sys.exit()
545 
546  try:
547  with open(prodcoolpasswdfile, 'r') as passwdfile:
548  passwd = passwdfile.read().strip()
549  except:
550  sys.exit('ERROR: Unable to determine COOL upload password')
551 
552  try:
553  with open(flaskcoolpasswdfile, 'r') as flaskpasswdfile:
554  flaskpasswd = flaskpasswdfile.read().strip()
555  except:
556  fail('Unable to determine FLASK upload password')
557 
558  print ('\nData set: ',dsname)
559  print ('Beam spot file: ',dbfile[0])
560  print ('Uploading to tag: ',options.beamspottag)
561  os.system('dumpBeamSpot.py -d %s -t %s %s' % (options.srcdbname,options.srctag,dbfile[0]))
562 
563  if options.ignoremode:
564  ignoremode = '--passopt="--appendlocked --ignoremode %s"' % options.ignoremode
565  else:
566  ignoremode = ''
567  if options.batch:
568  batchmode = '--batch'
569  else:
570  batchmode = ''
571 
572  print('command: /afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret <flaskpassword> --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W <password>' % (batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname))
573  stat = os.system('/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (flaskpasswd,batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname,passwd))
574 
575  if stat:
576  print ("\n\nERROR: UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
577  sys.exit(1)
578 
579  # Record upload information, both in task DB (field COOLTAGS) and in upload flag file
580  uploadflag = dbfile[0]+'.uploaded'
581  stat = os.system('echo "`date` %s" >> %s' % (options.beamspottag,uploadflag) )
582  if stat:
583  print ("ERROR: Uploading was successful, but unable to set upload flag", uploadflag )
584 
585  cooltags = options.beamspottag
586 
587  # If uploading to the beamspot tag linked to Current BLK alias and Next BLK alias aslo exists (not '')
588  # and points to different tag then AtlCoolMerge will upload to the Next beamspot tag too. Hence record
589  # that in COOLTAGS too. Mimik logic in AtlCoolMergeLib.py
590 
591  nextbeamspot = ''
592  try:
593  nextbeamspot = COOLUtils.resolveNextBeamSpotFolder()
594  except:
595  nextbeamspot = ''
596  if nextbeamspot == options.beamspottag:
597  nextbeamspot = ''
598  if not ('UPD' in options.beamspottag and 'UPD' in nextbeamspot):
599  nextbeamspot = ''
600  if nextbeamspot != '':
601  print ('Additionally uploading to Next tag: ',nextbeamspot)
602  cooltags = appendUnique(cooltags, nextbeamspot)
603 
604  # Upload tag(s) to TaskManager
605  t = taskman.getTaskDict(dsname,task)
606  taskman.setValue(dsname,task,'COOLTAGS',appendUnique(t['COOLTAGS'],cooltags))
607 
608  sys.exit(0)
609 
610 
611 #
612 # Retrieve task data from grid job
613 #
614 if cmd=='dq2get' and len(args)==3:
615  try:
616  with getTaskManager() as taskman:
617  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,addWildCards=not options.nowildcards)
618  except TaskManagerCheckError as e:
619  print (e)
620  sys.exit(1)
621  dir = os.path.join(dsname, task)
622  griddsname = '%s.%s-%s' % (options.griduser,dsname,task)
623  path = os.path.join(dir, griddsname)
624  if os.path.exists(path):
625  print ('ERROR: Path exists already:',path)
626  sys.exit(1)
627  print ('Extracting into %s/%s ...' % (dir,griddsname))
628  stat = os.system('cd %s; dq2-get -T 6,6 %s' % (dir,griddsname))
629  if stat:
630  print ("ERROR: Problem occurred while extracting data set - task status not changed")
631 
632  statfile = glob.glob('%s/000/*.status.SUBMITTED' % (dir))
633  if len(statfile)!=1:
634  print ("ERROR: Unable to uniquely identfying status - giving up. Candidate status files are:")
635  print (statfile)
636  sys.exit(1)
637  os.system('rm %s' % statfile[0])
638  basename = statfile[0][:-17]
639  os.system('touch %s.exit.0' % basename)
640  os.system('touch %s.COMPLETED' % basename)
641  print ("\nSuccessfully downloaded data set",griddsname)
642  os.system('du -hs %s' % path)
643  print()
644  sys.exit(0)
645 
646 
647 #
648 # Query T0 task status
649 #
650 if cmd=='queryT0' and len(args)==3:
651  try:
652  with getTaskManager() as taskman:
653  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,addWildCards=not options.nowildcards)
654  except TaskManagerCheckError as e:
655  print (e)
656  sys.exit(1)
657  tags = task.split('.')[-1]
658  # FIXME: change the following to automatically determine from the input files of the DB_BEAMSPOT job?
659  if 'ESD' in options.filter:
660  t0TaskName = '%s.recon.ESD.%s.beamspotproc.task' % (dsname,tags)
661  else:
662  if any(t[0] == 'm' for t in tags.split('_')):
663  t0TaskName = '%s.merge.AOD.%s.beamspotproc.task' % (dsname,tags)
664  else:
665  t0TaskName = '%s.recon.AOD.%s.beamspotproc.task' % (dsname,tags)
666 
667  print ('Querying Tier-0 database for task',t0TaskName,'...')
668  oracle = getT0DbConnection()
669  cur = oracle.cursor()
670  try:
671  #sql = str("SELECT status FROM etask WHERE taskname='%s' AND creationtime>sysdate-10" % t0TaskName) # Oracle doesn't want unicode
672  sql = str("SELECT status FROM tasks WHERE taskname='%s' AND tasktype='beamspotproc'" % t0TaskName) # Oracle doesn't want unicode
673  cur.execute(sql)
674  r = cur.fetchall()
675  except Exception as e:
676  print (e)
677  sys.exit('ERROR: Unable to retrieve status of task %s' % t0TaskName)
678  if len(r)==0:
679  sys.exit('ERROR: No such task found: %s' % t0TaskName)
680  if len(r)>1:
681  sys.exit('ERROR: %i tasks found - unable to determine task status' % len(r))
682  status = r[0][0]
683  print ('\nTask status = %s\n' % status)
684  if status=='FINISHED' or status=='TRUNCATED':
685  sys.exit(0)
686  else:
687  sys.exit(2)
688 
689 
690 #
691 # Run command over set of matching tasks
692 #
693 if cmd=='runCmd' and len(args)==4:
694  dssel = args[1]
695  tasksel = args[2]
696  cmd = args[3]
697  if options.nowildcards:
698  qual = ["where DSNAME like '%s' and TASKNAME like '%s'" % (dssel,tasksel)]
699  else:
700  qual = ["where DSNAME like '%%%s%%' and TASKNAME like '%%%s%%'" % (dssel,tasksel)]
701  if options.beamspottag:
702  qual.append("and COOLTAGS like '%%%s%%'" % options.beamspottag)
703  #qual.append("order by RUNNR desc")
704  qual.append("order by RUNNR")
705  print ('Running command\n')
706  print (' ',cmd % ({'DSNAME': 'DSNAME', 'TASKNAME': 'TASKNAME', 'RUNNR': 'RUNNR', 'FULLDSNAME': 'FULLDSNAME'}))
707  print ('\nover the following datasets / tasks:\n')
708  print (' %-10s %-40s %s' % ('Run','Dataset','Task'))
709  print (' %s' % (74*'-'))
710 
711  taskList = []
712  with getTaskManager() as taskman:
713  for t in taskman.taskIterDict('*',qual):
714  dsname = t['DSNAME']
715 
716  # Deal with express vs minBias in 900 GeV runs
717  if options.nominbias and dsname.find('physics_MinBias') :
718  print ('Warning: changing physics_MinBias in dsname to express_express')
719  dsname = dsname.replace('physics_MinBias','express_express')
720  t['DSNAME'] = dsname
721 
722  runnr = t['RUNNR']
723  taskname = t['TASKNAME']
724  print (' %-10s %-40s %s'% (runnr,dsname,taskname))
725 
726  if options.excludeiftask:
727  if options.nowildcards:
728  # n = taskman.getNTasks(['where DSNAME =',DbParam(dsname),'and TASKNAME like',DbParam(options.excludeiftask)])
729  n = taskman.getNTasks(["where DSNAME ='%s' and TASKNAME like '%s'" % (dsname,options.excludeiftask)])
730  else:
731  n = taskman.getNTasks(["where DSNAME ='%s' and TASKNAME like '%%%s%%'" % (dsname,options.excludeiftask)])
732  if n!=0:
733  print (' ==> SKIPPING - %i matching tasks found' % n)
734  continue
735 
736  if options.excludeds:
737  excludeList = options.excludeds.split(',')
738  for s in excludeList:
739  if s in dsname:
740  print (' ==> SKIPPING dataset')
741  continue
742 
743  if options.dslist:
744  stat, out = subprocess.getstatusoutput('grep -c %s %s' % (dsname, options.dslist))
745  if stat==2: # Missing file etc
746  print (out)
747  sys.exit(1)
748 
749  try:
750  if int(out)==0:
751  print (' ==> SKIPPING - dataset %s not found in %s' % (dsname, options.dslist))
752  continue
753  except ValueError: # Some other error
754  print (out)
755  sys.exit(1)
756 
757  del stat, out
758  # # Find full dtasetname from file
759  stat, out = subprocess.getstatusoutput('grep %s %s' % (dsname, options.dslist))
760  if stat==2: # Missing file etc
761  print (out)
762  sys.exit(1)
763 
764  t['FULLDSNAME'] = out.strip()
765 
766  taskList.append(t)
767 
768  if not taskList:
769  print ('\nNo jobs need to be run.\n')
770  sys.exit(0)
771 
772  print ('\n%i datasets / tasks selected.\n' % len(taskList))
773  if not options.batch:
774  a = input('\nARE YOU SURE [n] ? ')
775  if a!='y':
776  sys.exit('ERROR: Running of monitoring tasks aborted by user')
777  print()
778  for t in taskList:
779  fullcmd = cmd % t
780  print ('\nExecuting: ',fullcmd,' ...')
781  sys.stdout.flush()
782  os.system(fullcmd)
783 
784  print()
785  sys.exit(0)
786 
787 
788 #
789 # Run monitoring jobs for jobs completed at most MAXTIME ago
790 #
791 if cmd=='runMonJobs' and len(args)<3:
792  if len(args)==2:
793  earliestUpdateTime = time.time()-float(args[1])
794  else:
795  earliestUpdateTime = 0
796  if not options.beamspottag:
797  sys.exit('ERROR: No beam spot tag specified')
798 
799  print ('Running the following monitoring tasks for tasks of type %s:\n' % options.runtaskname)
800  print (' %-10s %s' % ('RUN','MONITORING TASK'))
801  print (' %s' % (60*'-'))
802 
803  onDiskCode = TaskManager.OnDiskCodes['ALLONDISK']
804  taskList = []
805  with getTaskManager() as taskman:
806  for t in taskman.taskIterDict('*',["where TASKNAME like '%s%%'" % options.runtaskname,
807  "and UPDATED >= ", DbParam(earliestUpdateTime),
808  "and ONDISK = ", DbParam(onDiskCode),
809  "and COOLTAGS like '%%%s%%'" % options.beamspottag,
810  "order by RUNNR desc"]):
811  dsname = t['DSNAME']
812  runnr = t['RUNNR']
813  taskName = t['TASKNAME']
814  datatag = taskName.split('.')[-1].split('_')[0]
815  monTaskName = 'MON.%s.%s' % (taskName,datatag)
816  useRun = True
817  if options.useRun is not None:
818  useRun = False
819  if runnr == options.useRun:
820  useRun = True
821 
822  if useRun:
823  try:
824  m = next(taskman.taskIterDict('*',["where RUNNR =",DbParam(runnr),"and DSNAME =",DbParam(dsname),"and TASKNAME =",DbParam(monTaskName),"order by UPDATED desc"]))
825  print (' %-10s %s %s'% (runnr,dsname,monTaskName))
826  except:
827  print (' * %-10s %s %s'% (runnr,dsname,'--- no monitoring task found ---'))
828  taskList.append(t)
829  pass
830 
831  if not taskList:
832  print ('\nNo jobs need to be run.\n')
833  sys.exit(0)
834 
835  if not options.batch:
836  a = input('\nARE YOU SURE [n] ? ')
837  if a!='y':
838  sys.exit('ERROR: Running of monitoring tasks aborted by user')
839  print()
840 
841  oracle = getT0DbConnection()
842  for t in taskList:
843  dsname = t['DSNAME']
844  runnr = t['RUNNR']
845  ptag = dsname.split('.')[0]
846  stream = dsname.split('.')[2]
847  taskName = t['TASKNAME']
848  fulldatatag = taskName.split('.')[-1]
849  datatag = fulldatatag.split('_')[0]
850  monTaskName = 'MON.%s' % (taskName)
851 
852  # Now that beamspot folder tag can change, the tag to be monitored must be the one the parent task uploaded to.
853  # If more than one tag (might be case if upload to current/next) then take first (should be same)
854  cooltags = t['COOLTAGS']
855  if not cooltags: cooltags = ''
856  bstag = cooltags.split()[0]
857 
858  filter = 'AOD'
859  if any(t[0] == 'm' for t in fulldatatag.split('_')):
860  t0dsname = '%s.merge.AOD.%s%%' % (dsname, datatag)
861  else:
862  t0dsname = '%s.recon.AOD.%s%%' % (dsname, datatag)
863 
864  c = getJobConfig('.',dsname,taskName)
865  if 'ESD' in c['inputfiles'][0]:
866  filter = 'ESD'
867  t0dsname = '%s.recon.ESD.%s' % (dsname, datatag)
868 
869  print ('\nRunning monitoring job for run %s:' % runnr)
870 
871  submitjob=True
872  eospath=options.eospath
873  # for old runs we would need to check if the dataset had been replicated at Tier-0.
874  # with EOS this is no longer necessary.
875  r=[]
876  if int(runnr)<240000:
877  print ('... Querying T0 database for replication of %s' % t0dsname)
878  cur = oracle.cursor()
879  cur.execute("select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
880  r = cur.fetchall()
881  if not r:
882  print (' WARNING: input data not yet replicated - please retry later')
883  submitjob=False
884  else:
885  print ('... Querying T0 database for completion of merging jobs of %s' % t0dsname)
886  cur = oracle.cursor()
887  origt0TaskName='%s.recon.AOD.%s%%.aodmerge.task' % (dsname,datatag)
888  cur.execute("select status from tasks where taskname like '%s' and tasktype='aodmerge'" % origt0TaskName)
889  r = cur.fetchall()
890  if not r:
891  print (' WARNING: can\'t get status of merge job for %s, running on un-merged samples instead' % origt0TaskName)
892  eospath='/eos/atlas/atlastier0/tzero/prod'
893  elif not (r[0][0]=='FINISHED' or r[0][0]=='TRUNCATED'):
894  print (' Merge job for taskname %s is not finished yet, has status %s, running on un-merged samples instead.' % (origt0TaskName, r[0][0]))
895  eospath='/eos/atlas/atlastier0/tzero/prod'
896  else:
897  print (' Merge job is finished, launching jobs.')
898  submitjob=True
899 
900  if submitjob:
901  if int(runnr)<240000:
902  print (' ',r)
903  print ('... Submitting monitoring task')
904  queue = options.batch_queue or '\'\"tomorrow\"\''
905  paramValues = ''
906  if options.params:
907  paramValues = '--params \''+options.params+'\''
908  testFlag = ''
909  if options.testonly:
910  testFlag = '--test'
911 
912  cmd = 'beamspotman --eospath=%s -p %s -s %s -f \'.*\\.%s\\..*\' -t %s --queue %s %s %s --montaskname %s runMon %i %s' % (eospath,ptag,stream,filter,bstag,queue,paramValues,testFlag,monTaskName,int(runnr),datatag)
913  print (cmd)
914  sys.stdout.flush()
915  status = os.system(cmd) >> 8 # Convert to standard Unix exit code
916  if status:
917  print ('\nERROR: Job submission returned error - exit code %i\n')
918 
919  print()
920  sys.exit(0)
921 
922 
923 #
924 # Archive task to EOS
925 #
926 if cmd=='archive' and len(args)==3:
927  if not options.batch:
928  print ('\nWARNING: If you confirm below, each of the following datasets will:')
929  print (' - be archived to EOS')
930  if options.resultsondisk:
931  print (' - will be marked as RESULTSONDISK in the task database')
932  print (' - all except the results files *** WILL BE DELETED ***')
933  else:
934  print (' - will be marked as ARCHIVED in the task database')
935  print (' - all its files *** WILL BE DELETED ***')
936  print()
937 
938  with getTaskManager() as taskman:
939  try:
940  taskList = getFullTaskNames(taskman,args[1],args[2],confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
941  except TaskManagerCheckError as e:
942  print (e)
943  sys.exit(1)
944 
945  tmpdir = '/tmp'
946  path = archivepath
947  onDiskCode = TaskManager.OnDiskCodes.get('ALLONDISK',None)
948  archivedCode = TaskManager.OnDiskCodes.get('RESULTSONDISK',None) if options.resultsondisk else TaskManager.OnDiskCodes.get('ARCHIVED',None)
949  exceptList = ['*dqflags.txt', '*.gif', '*.pdf', '*.config.py*', '*.argdict.gpickle', '*.AveBeamSpot.log', '*.PlotBeamSpotCompareReproc.log', '*.sh', '*.BeamSpotNt*', '*.BeamSpotGlobalNt.log', '*.status.*', '*.exit.*']
950 
951  for (dsname,taskname) in taskList:
952  t = taskman.getTaskDict(dsname,taskname)
953 
954  # Check that task files are still on disk, so we're not overwriting an earlier archive
955  if t['ONDISK'] != onDiskCode:
956  print ('Skipping task %s / %s status %s (task files must be on disk)' % (dsname,taskname,getKey(TaskManager.OnDiskCodes,t['ONDISK'])))
957  print()
958  continue
959 
960  dir = '%s/%s' % (dsname,taskname)
961  outname = dir.replace('/','-')+time.strftime('-%G_%m_%d.tar.gz')
962  print ('Archiving task %s / %s ...' % (dsname,taskname))
963  print (' --> %s/%s ...' % (path,outname))
964 
965  # Paranoia check against later catastrophic delete
966  if dir=='.' or dir=='*':
967  print ('\n**** FATAL ERROR: Very dangerous value of task directory found: %s - ABORTING' % dir)
968  sys.exit(1)
969 
970  # If expected directory exists, tar up files, write to EOS, mark as archived, and delete
971  if os.path.exists(dir):
972  status = os.system('tar czf %s/%s %s' % (tmpdir,outname,dir)) >> 8
973  if status:
974  sys.exit('\n**** ERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
975  status = os.system('xrdcp %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
976  if status:
977  os.system('rm -rf %s/%s' % (path,outname) )
978  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
979  if status:
980  sys.exit('\n**** ERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
981 
982  os.system('rm %s/%s' % (tmpdir,outname))
983  n = taskman.setValue(dsname,taskname,'ONDISK',archivedCode)
984  if n!=1:
985  sys.exit('\n**** ERROR: Unexpected number of tasks modified: %i instead of 1 (DSNAME=%s,TASKNAME=%s)' % (n,dsname,taskname))
986 
987  if options.resultsondisk:
988  oscmd = r"find %s ! \‍( -name '%s' \‍) -type f -exec rm {} \;" % (dir, "' -or -name '".join(exceptList))
989  os.system(oscmd)
990  else:
991  os.system('rm -rf %s' % dir)
992  else:
993  print ('\n**** ERROR: No task directory',dir,'\n')
994 
995  print()
996  sys.exit(0)
997 
998 #
999 # Run beam spot resubmit failed jobs
1000 # Double check directory structure is appropriate for you
1001 #
1002 
1003 if cmd=='resubmit' and len(args) in [3,4]:
1004 
1005  dsname = args[1]
1006  taskname = args[2]
1007 
1008  # for backwards compatibility these are equivalent:
1009  # $0 resubmit DSNAME TASKNAME QUEUE
1010  # $0 -q QUEUE resubmit DSNAME TASKNAME
1011  queue = args[3] if len(args) == 4 else options.batch_queue
1012  if not queue:
1013  print ('ERROR: No queue was specified (use -q)')
1014  sys.exit(1)
1015 
1016  basepath = os.path.join(os.getcwd(), dsname, taskname)
1017  dircontents = os.listdir(basepath)
1018 
1019  condorScriptTemplate="""executable = %(scriptfile)s
1020 arguments = $(ClusterID) $(ProcId)
1021 output = %(logfile)s.out
1022 error = %(logfile)s.err
1023 log = %(logfile)s.log
1024 universe = vanilla
1025 +JobFlavour = %(batchqueue)s
1026 queue
1027 """
1028 
1029 
1030  for dir in dircontents:
1031  if not os.path.isdir(os.path.join(basepath, dir)):
1032  continue
1033  print (dir)
1034  jobname = dir
1035  if (options.montaskname in taskname.split('.')) or options.legacy_mon:
1036  jobname = '-'.join([dsname, taskname, 'lb' + dir])
1037  fullpath = os.path.join(basepath, dir)
1038 
1039  isRunning = False
1040  isFailed = False
1041  for f in os.listdir(fullpath):
1042  if re.search('RUNNING', f):
1043  isRunning = True
1044  if re.search('COMPLETED',f) or re.search('POSTPROCESSING',f):
1045  with open(os.path.join(fullpath, jobname + '.exitstatus.dat')) as statusFile:
1046  status = statusFile.read(1)
1047  print (status)
1048  if status != "0":
1049  isFailed = True
1050  isRunning = False
1051  if options.resubRunning and isRunning:
1052  print ("Will resubmit running job")
1053  elif (isRunning or not isFailed) and not options.resubAll:
1054  continue
1055 
1056 
1057  for f in os.listdir(fullpath):
1058  if re.search('.exitstatus.', f):
1059  os.remove(os.path.join(fullpath, f))
1060  elif re.search('.exit.', f):
1061  os.remove(os.path.join(fullpath, f))
1062  elif re.search('.status.', f):
1063  os.remove(os.path.join(fullpath, f))
1064  elif re.search('.log', f):
1065  os.remove(os.path.join(fullpath, f))
1066  elif re.search('.py.final.py', f):
1067  os.remove(os.path.join(fullpath, f))
1068 
1069  jobConfig = {
1070  'test': 'this',
1071  'batchqueue' : queue,
1072  'jobname' : jobname,
1073  'jobdir' : fullpath,
1074  }
1075  jobConfig['logfile'] = '%(jobdir)s/%(jobname)s.log' % jobConfig
1076  jobConfig['scriptfile'] = '%(jobdir)s/%(jobname)s.sh' % jobConfig
1077 
1078  condorScript = condorScriptTemplate % jobConfig
1079  print (condorScript)
1080  script = open('condorSubmit.sub','w')
1081  script.write(condorScript)
1082  script.close()
1083  os.chmod('condorSubmit.sub',0o755)
1084  #batchCmd = 'bsub -L /bin/bash -q %(batchqueue)s -J %(jobname)s -o %(logfile)s %(scriptfile)s' % jobConfig
1085  batchCmd = 'condor_submit condorSubmit.sub'
1086 
1087  print (batchCmd)
1088  os.system(batchCmd)
1089 
1090  with getTaskManager() as taskman:
1091  print (taskman.getStatus(dsname, taskname))
1092  taskman.setStatus(dsname, taskname, TaskManager.StatusCodes['RUNNING'] )
1093 
1094  sys.exit(0)
1095 
1096 
1097 #
1098 # Run beam spot reprocessing jobs
1099 # Defines one task with several jobs where the splitting into groups of 5 LBs is done by hand
1100 #
1101 if cmd=='reproc' and len(args)==5:
1102  from InDetBeamSpotExample import HTCondorJobRunner
1103 
1104  jobopts = args[1]
1105  dsname = args[2]
1106  taskname = args[3]
1107  inputdata = args[4]
1108 
1109  lbperjob = options.lbperjob
1110  params = {'LumiRange': lbperjob}
1111  cmd = ' '.join(sys.argv)
1112  files = []
1113 
1114  # Additional job parameters
1115  for s in options.params.split(', '):
1116  if s:
1117  try:
1118  p = s.split('=',1)
1119  params[p[0].strip()] = eval(p[1].strip())
1120  except:
1121  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
1122 
1123  # TODO: Is this still needed?
1124  # if options.rucio:
1125  # # Has extra subdirectories: find unique set of these from filepaths, ignoring final part (i.e. filename).
1126  # # Will also work for single directory but more time consuming.
1127  # #inputdirs = set(['/'+'/'.join(f.split('/')[4:-1])+'/' for f in files])
1128  # inputdirs = set(['/'.join(f.replace(protocol, '').split('/')[:-1])+'/' for f in files])
1129 
1130  # Make list of all files (either from explicit list or by 'globbing' dir)
1131  lbMap = {}
1132  backend = DiskUtils.EOS() if options.eos else None
1133  fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1134  fs = fs.matching(options.filter).use_files_from(options.filelist).only_latest(options.removedups)
1135  for f, lbs in file_set.with_lumi_blocks(options.lbfilemap):
1136  lbMap[f] = lbs
1137  files.append(f)
1138  if not files: fail('No files were found.')
1139 
1140  # Form bunched jobs
1141  jobFileDict = {}
1142  jobLBDict = {}
1143 
1144  # Special case to submit a single job over all files
1145  if lbperjob == -1:
1146  jobId = 1
1147  jobFileDict[jobId] = files
1148  jobLBDict[jobId] = []
1149 
1150  for f in files:
1151  try:
1152  lbs = sorted(lbMap[f.split('/')[-1]])
1153  except KeyError:
1154  sys.exit('No mapping for file %s' % f.split('/')[-1])
1155 
1156  jobLBDict[jobId].extend(lbs)
1157  else:
1158  for f in files:
1159  try:
1160  lbs = sorted(lbMap[f.split('/')[-1]])
1161  except KeyError:
1162  sys.exit('No mapping for file %s' % f.split('/')[-1])
1163 
1164  for lbnr in lbs:
1165  jobId = int((lbnr-1)/lbperjob)
1166 
1167  if not jobId in jobFileDict:
1168  jobFileDict[jobId] = [f]
1169  jobLBDict[jobId] = [lbnr]
1170  else:
1171  if not f in jobFileDict[jobId]:
1172  jobFileDict[jobId].append(f)
1173  jobLBDict[jobId].append(lbnr)
1174 
1175  # Submit bunched jobs
1176  with getTaskManager() as taskman:
1177  for i in sorted(jobFileDict.keys()):
1178  jobnr = i*lbperjob+1 # use first LB number as job number
1179  files=jobFileDict[i]
1180  lbs = sorted(set(jobLBDict[i]))
1181 
1182  #params['lbList'] = '[' + ','.join([str(l) for l in lbs]) + ']'
1183  intlbs = []
1184  for lbnr in lbs:
1185  intlbs.append(int(lbnr))
1186  params['lbList'] = intlbs
1187  jobname=dsname+'-'+taskname+'-lb%03i' % jobnr
1188 
1189  queue = options.batch_queue or '"tomorrow"'
1190  runner = HTCondorJobRunner.HTCondorJobRunner(
1191  jobnr=jobnr,
1192  jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1193  jobname=jobname,
1194  inputds='',
1195  inputfiles=files,
1196  joboptionpath=jobopts,
1197  filesperjob=len(files),
1198  batchqueue=queue,
1199  addinputtopoolcatalog=True,
1200  taskpostprocsteps='ReprocVertexDefaultProcessing',
1201  #outputfilelist=['dpd.root', 'nt.root', 'monitoring,root', 'beamspot.db'],
1202  autoconfparams='DetDescrVersion',
1203  returnstatuscode=True,
1204  comment=cmd,
1205  **params)
1206 
1207  #runner.showParams()
1208  try:
1209  runner.configure()
1210  except Exception as e:
1211  print ("ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1212  print ("DEBUG: Exception =",e)
1213  else:
1214  taskman.addTask(dsname, taskname, jobopts, runner.getParam('release'), runner.getNJobs(), runner.getParam('taskpostprocsteps'), comment=cmd)
1215  runner.run()
1216 
1217  sys.exit(0)
1218 
1219 
1220 #
1221 # Run task over AOD, bunching input files to jobs according to meta-data in input
1222 # AOD files.
1223 #
1224 
1225 if cmd=='runaod' and len(args)==5:
1226 
1227  from InDetBeamSpotExample import HTCondorJobRunner
1228  jobopts = args[1]
1229  dsname = args[2]
1230  taskname = args[3]
1231  inputdata = args[4]
1232 
1233  lbperjob = options.lbperjob
1234  params = {'LumiRange': lbperjob}
1235 
1236  # Always fit a single pLb for scans
1237  if options.pseudoLbFile:
1238  params = {'LumiRange': 1}
1239 
1240  cmd = ' '.join(sys.argv)
1241  files = []
1242 
1243  # Additional job parameters
1244  for s in options.params.split(', '):
1245  if s:
1246  try:
1247  p = s.split('=',1)
1248  params[p[0].strip()] = eval(p[1].strip())
1249  except:
1250  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
1251 
1252  lbMap = {}
1253  backend = DiskUtils.EOS() if options.eos else None
1254  fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1255  print ("****************************************************")
1256  print ("*************** printing files *********************")
1257  print (fs)
1258  fs = fs.matching(options.filter)
1259  for f, lbs in fs.with_lumi_blocks(options.lbfilemap):
1260  print (f, lbs)
1261  lbMap[f] = lbs
1262  files.append(f)
1263  if not files: fail('No files were found.')
1264 
1265  # Form bunched jobs
1266  jobFileDict = {}
1267  jobLBDict = {}
1268  jobParams = {}
1269 
1270  if options.pseudoLbFile:
1271  # Extract start and end times of real LBs
1272  from InDetBeamSpotExample.COOLUtils import COOLQuery
1273  coolQuery = COOLQuery()
1274  from InDetBeamSpotExample.Utils import getRunFromName
1275  lbTimes = coolQuery.getLbTimes( getRunFromName(dsname, None, True) )
1276 
1277  # Loop over pseudo LBs
1278  with open(options.pseudoLbFile) as pLbFile:
1279  for line in pLbFile:
1280  if line[0] == '#': continue
1281 
1282  tokens = line.split()
1283  plbnr,tstart,tend = int(tokens[0]),int(tokens[1]),int(tokens[2])
1284  jobId = int(plbnr/lbperjob)
1285 
1286  # if we're passing in a file with acqFlags, and we're checking those flags,
1287  # then skip any points that don't have acqFlag=1.0
1288  if not options.noCheckAcqFlag and len(tokens)>=5 and abs(float(tokens[4])-1.)>0.001:
1289  print ("Point is not stationary -- skipping job %d" % jobId)
1290  continue
1291 
1292  # Find real LBs covering time period of pseudo LB. Assumes pLBs in nsec
1293  rlbs = [lb for (lb,time) in lbTimes.items() if (time[0] - tend/1e9)*(time[1] - tstart/1e9) < 0]
1294 
1295  # Find files containing those real lbs
1296  filenames = []
1297  for f in files:
1298  try:
1299  lbs = sorted(lbMap[f])
1300  except KeyError:
1301  sys.exit('No mapping for file %s' % f.split('/')[-1])
1302 
1303  if not sum([lb for lb in lbs if lb in rlbs]): continue
1304 
1305  filenames.append(f)
1306  try:
1307  jobLBDict[jobId].extend([lb for lb in rlbs if not lb in jobLBDict[jobId]])
1308  jobFileDict[jobId].extend([f for f in filenames if not f in jobFileDict[jobId]])
1309  jobParams[jobId]['lbData'].append(line.strip('\n').strip())
1310  except KeyError:
1311  jobLBDict[jobId] = rlbs
1312  jobFileDict[jobId] = filenames
1313  jobParams[jobId] = {'lbData' : [line.strip('\n').strip()]}
1314 
1315  else:
1316  for f in files:
1317  try:
1318  lbs = sorted(lbMap[f])
1319  except KeyError:
1320  print ('WARNING: No mapping for file %s. Skipping' % f.split('/')[-1])
1321  continue
1322  #sys.exit('No mapping for file %s' % f.split('/')[-1])
1323 
1324  for lbnr in lbs:
1325  jobId = int((lbnr-1)/lbperjob)
1326 
1327  if not jobId in jobFileDict:
1328  jobFileDict[jobId] = [f]
1329  jobLBDict[jobId] = [lbnr]
1330  else:
1331  if not f in jobFileDict[jobId]:
1332  jobFileDict[jobId].append(f)
1333  jobLBDict[jobId].append(lbnr)
1334 
1335  # Submit bunched jobs
1336  with getTaskManager() as taskman:
1337  for i in sorted(jobFileDict.keys()):
1338  jobnr = i*lbperjob+1 # use first LB number as job number
1339  files=jobFileDict[i]
1340  lbs = sorted(set(jobLBDict[i]))
1341 
1342  intlbs = []
1343  for lbnr in lbs:
1344  intlbs.append(int(lbnr))
1345  params['lbList'] = intlbs
1346 
1347  # Allow job-by-job parameters
1348  try:
1349  p = jobParams[i]
1350  for k,v in p.items(): params[k] = v
1351  except KeyError:
1352  pass
1353 
1354  jobname=dsname+'-'+taskname+'-lb%03i' % jobnr
1355 
1356  queue = options.batch_queue
1357  if queue is None:
1358  # run on a different queue for VdM scans to avoid clogging up the normal queue
1359  queue='"tomorrow"' if options.pseudoLbFile else '"tomorrow"'
1360  runner = HTCondorJobRunner.HTCondorJobRunner(
1361  jobnr=jobnr,
1362  jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1363  jobname=jobname,
1364  inputds='',
1365  inputfiles=files,
1366  joboptionpath=jobopts,
1367  filesperjob=len(files),
1368  batchqueue=queue,
1369  addinputtopoolcatalog=True,
1370  taskpostprocsteps=' '.join(options.postprocsteps),
1371  autoconfparams='DetDescrVersion',
1372  returnstatuscode=True,
1373  comment=cmd,
1374  **params)
1375  if options.testonly:
1376  runner.showParams()
1377  else:
1378  try:
1379  runner.configure()
1380  except Exception as e:
1381  print ("ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1382  print ("DEBUG: Exception =",e)
1383  else:
1384  taskman.addTask(dsname, taskname, jobopts, runner.getParam('release'), runner.getNJobs(), runner.getParam('taskpostprocsteps'), comment=cmd)
1385  runner.run()
1386 
1387  sys.exit(0)
1388 
1389 
1390 
1391 #
1392 # Upload any DQ sqlite file to COOL (independent of task, w/o book keeping)
1393 #
1394 if cmd=='dqflag' and len(args)==2:
1395  dbfile = args[1]
1396  if not options.dqtag:
1397  sys.exit('ERROR: No beamspot DQ tag specified')
1398  try:
1399  with open(proddqcoolpasswdfile, 'r') as passwdfile:
1400  passwd = passwdfile.read().strip()
1401  except:
1402  sys.exit('ERROR: Unable to determine DQ COOL upload password')
1403 
1404  print ('\nBeam spot DQ file: ',dbfile)
1405  print ('Uploading to tag: ',options.dqtag)
1406 
1407  if options.ignoremode:
1408  ignoremode = '--ignoremode %s' % options.ignoremode
1409  else:
1410  ignoremode = ''
1411 
1412  if options.batch:
1413  batchmode = '--batch'
1414  else:
1415  batchmode = ''
1416 
1417  print()
1418  cmd = 'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1419  print (cmd)
1420 
1421  stat = os.system(cmd)
1422 
1423  # Old DQ flags
1424  #stat = os.system('/afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s --folder /GLOBAL/DETSTATUS/SHIFTOFL --tag %s --retag %s --destdb %s %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_GLOBAL_W %s' % (batchmode,ignoremode,options.srcdqtag,options.dqtag,options.destdqdbname,dbfile,options.srcdqdbname,passwd))
1425 
1426  if stat:
1427  print ("\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1428  sys.exit(1)
1429  sys.exit(0)
1430 
1431 #
1432 # Upload beam spot DQ flag for given task to COOL
1433 #
1434 if cmd=='dqflag' and len(args)==3:
1435  try:
1436  with getTaskManager() as taskman:
1437  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
1438  except TaskManagerCheckError as e:
1439  print (e)
1440  sys.exit(1)
1441 
1442  if not options.dqtag:
1443  sys.exit('ERROR: No beam spot DQ tag specified')
1444 
1445  dbfile = glob.glob('%s/%s/*-dqflags.db' % (dsname,task))
1446  if len(dbfile)!=1:
1447  print ('ERROR: Missing or ambiguous input COOL DQ file:',dbfile)
1448  sys.exit()
1449 
1450  try:
1451  with open(proddqcoolpasswdfile, 'r') as passwdfile:
1452  passwd = passwdfile.read().strip()
1453  except:
1454  sys.exit('ERROR: Unable to determine DQ COOL upload password')
1455 
1456  print ('\nData set: ',dsname)
1457  print ('Beam spot DQ file: ',dbfile[0])
1458  print ('Uploading to tag: ',options.dqtag)
1459 
1460  if options.ignoremode:
1461  ignoremode = '--ignoremode %s' % options.ignoremode
1462  else:
1463  ignoremode = ''
1464  if options.batch:
1465  batchmode = '--batch'
1466  else:
1467  batchmode = ''
1468 
1469  cmd = 'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1470  print ("Running %s" % cmd)
1471  stat = os.system(cmd)
1472 
1473  # Old DQ flags
1474  #stat = os.system('/afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s --folder /GLOBAL/DETSTATUS/SHIFTOFL --tag %s --retag %s --destdb %s %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_GLOBAL_W %s' % (batchmode,ignoremode,options.srcdqtag,options.dqtag,options.destdqdbname,dbfile[0],options.srcdqdbname,passwd))
1475  if stat:
1476  print ("\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1477  sys.exit(1)
1478 
1479  # Record upload information in upload flag file
1480  uploadflag = dbfile[0]+'.uploaded'
1481  stat = os.system('echo "`date` %s" >> %s' % (options.dqtag,uploadflag) )
1482  if stat:
1483  print ("ERROR: Uploading DQ flag was successful, but unable to set upload flag", uploadflag)
1484 
1485  sys.exit(0)
1486 
1487 #
1488 # Run beam spot BCID job
1489 #
1490 if cmd=='runBCID' and len(args)==3:
1491  run = args[1]
1492  tag = args[2]
1493 
1494  dataset, dsname = dataset_from_run_and_tag(run, tag)
1495 
1496  # NOTE: The command below may be executed via a cron job, so we need set STAGE_SVCCLASS
1497  # explicitly in all cases, since it may not be inherited from the environment.
1498  # NOTE: We pass along the filter setting, but currently we can do --lbperjob only for ESD since for
1499  # other data sets we have only the merged files.
1500  run_jobs(options.bcidjoboptions, dsname, options.bcidtaskname,
1501  {
1502  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
1503  'SeparateByBCID' : True,
1504  'VertexNtuple' : False,
1505  },
1506  '--files-per-job', 0,
1507  '--match', options.filter,
1508  '--exclude', r'.*\.TMP\.log.*',
1509  '-postprocsteps', 'BCIDDefaultProcessing')
1510 
1511  sys.exit(0)
1512 
1513 #
1514 # Run BCID jobs for jobs completed at most MAXTIME ago
1515 #
1516 if cmd=='runBCIDJobs' and len(args)<3:
1517  if len(args)==2:
1518  earliestUpdateTime = time.time()-float(args[1])
1519  else:
1520  earliestUpdateTime = 0
1521 
1522  if not options.beamspottag:
1523  sys.exit('ERROR: No beam spot tag specified')
1524 
1525  print ('Running the following BCID tasks for tasks of type %s:\n' % options.runtaskname)
1526  print (' %-10s %s' % ('RUN','BCID TASK'))
1527  print (' %s' % (60*'-'))
1528 
1529  taskList = []
1530  with getTaskManager() as taskman:
1531  for t in taskman.taskIterDict('*',["where TASKNAME like '%s%%'" % options.runtaskname,
1532  "and UPDATED >= ", DbParam(earliestUpdateTime),
1533  "and COOLTAGS like '%%%s%%'" % options.beamspottag,
1534  "order by RUNNR desc"]):
1535  dsname = t['DSNAME']
1536  runnr = t['RUNNR']
1537  taskName = t['TASKNAME']
1538  datatag = taskName.split('.')[-1].split('_')[0]
1539  bcidTaskName = 'BCID.%s.%s' % (taskName,datatag)
1540 
1541  try:
1542  m = next(taskman.taskIterDict('*',['where RUNNR =',DbParam(runnr),'and DSNAME =',DbParam(dsname),'and TASKNAME =',DbParam(bcidTaskName),'order by UPDATED desc']))
1543  print (' %-10s %s'% (runnr,bcidTaskName))
1544  except:
1545  print (' * %-10s %s'% (runnr,'--- no BCID task found ---'))
1546  taskList.append(t)
1547  pass
1548 
1549  if not taskList:
1550  print ('\nNo jobs need to be run.\n')
1551  sys.exit(0)
1552 
1553  if not options.batch:
1554  a = input('\nARE YOU SURE [n] ? ')
1555  if a!='y':
1556  sys.exit('ERROR: Running of BCID tasks aborted by user')
1557  print()
1558 
1559  oracle = getT0DbConnection()
1560  for t in taskList:
1561  dsname = t['DSNAME']
1562  runnr = t['RUNNR']
1563  ptag = dsname.split('.')[0]
1564  stream = dsname.split('.')[2]
1565  taskName = t['TASKNAME']
1566  fulldatatag = taskName.split('.')[-1].split('_')[0]
1567  datatag = taskName.split('.')[-1].split('_')[0]
1568  bcidTaskName = 'BCID.%s.%s' % (taskName,datatag)
1569 
1570  filter = 'AOD'
1571  if any(t[0] == 'm' for t in fulldatatag.split('_')):
1572  t0dsname = '%s.merge.%s.%s%%' % (dsname, filter, datatag)
1573  else:
1574  t0dsname = '%s.recon.%s.%s%%' % (dsname, filter, datatag)
1575 
1576  c = getJobConfig('.',dsname,taskName)
1577  if 'ESD' in c['inputfiles'][0]:
1578  filter = 'ESD'
1579  t0dsname = '%s.recon.ESD.%s' % (dsname, datatag)
1580 
1581  print ('\nRunning BCID job for run %s:' % runnr)
1582 
1583  print ('... Querying T0 database for replication of %s' % t0dsname)
1584  cur = oracle.cursor()
1585  cur.execute("select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
1586  r = cur.fetchall()
1587  if not r:
1588  print (' WARNING: input data not yet replicated - please retry later')
1589  else:
1590  print (' ',r)
1591  print ('... Submitting BCID task')
1592  cmd = 'beamspotman.py -p %s -s %s -f \'.*\\.%s\\..*\' --bcidtaskname %s runBCID %i %s' % (ptag,stream,filter,'BCID.'+taskName,int(runnr),datatag)
1593  print (' %s' % cmd)
1594  sys.stdout.flush()
1595  status = os.system(cmd) >> 8 # Convert to standard Unix exit code
1596  if status:
1597  print ('\nERROR: Job submission returned error - exit code %i\n')
1598 
1599  print()
1600  sys.exit(0)
1601 #
1602 # Create an sqlite file containing a MC (OFLP200) tag a la beamSpot_set.py
1603 # Need -- before positional agruments to deal with -ve values
1604 # beamspotman.py -t IndetBeampos-7TeV-PeriodD-SmallWidth-001 mctag -- 0 0.1 1.1 -5 0.045 0.048 63 0.00043 -4e-05 9e-05
1605 #
1606 if cmd=='mctag' and len(args)<12:
1607 
1608  from InDetBeamSpotExample.COOLUtils import *
1609 
1610  if not options.beamspottag:
1611  sys.exit('ERROR: No beam spot tag specified')
1612 
1613  dbfile=options.beamspottag + '.db'
1614  folderHandle = openBeamSpotDbFile(dbfile, dbName = 'OFLP200', forceNew = True)
1615 
1616  runMin = options.runMin if options.runMin is not None else 0
1617  runMax = options.runMax if options.runMax is not None else (1 << 31)-1
1618 
1619  writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1620  runMin=runMin, runMax=runMax,
1621  status=int(args[1]),
1622  posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1623  sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1624  tiltX=float(args[8]) if len(args)>8 else 0.,
1625  tiltY=float(args[9]) if len(args)>9 else 0.,
1626  sigmaXY=float(args[10]) if len(args)>10 else 0.,
1627  posXErr=0., posYErr=0., posZErr=0.,
1628  sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1629  tiltXErr=0., tiltYErr=0.,
1630  sigmaXYErr=0.)
1631 
1632  print ('* MC beamspot tag written to db=OFLP200, tag=%s in %s ' %(options.beamspottag, dbfile))
1633  print (' - AtlCoolConsole.py "sqlite://;schema=' + dbfile + ';dbname=OFLP200"')
1634  print ('* To upload to oracle use:')
1635  print (' - beamspotman.py --srctag %s -t %s --srcdbname OFLP200 --destdbname OFLP200 upload %s' %(options.beamspottag, options.beamspottag, dbfile))
1636  sys.exit(0)
1637 
1638 #
1639 # Create an sqlite file containing a Data (CONDBR2) tag a la beamSpot_set.py
1640 # Need -- before positional agruments to deal with -ve values
1641 # beamspotman.py -t IndetBeampos-7TeV-PeriodD-SmallWidth-001 maketag -- 0 0.1 1.1 -5 0.045 0.048 63 0.00043 -4e-05 9e-05
1642 #
1643 if cmd=='maketag' and len(args)<12:
1644 
1645  from InDetBeamSpotExample.COOLUtils import *
1646 
1647  if not options.beamspottag:
1648  sys.exit('ERROR: No beam spot tag specified')
1649 
1650  dbfile=options.beamspottag + '.db'
1651  dbName=options.destdbname
1652  folderHandle = openBeamSpotDbFile(dbfile, dbName = options.destdbname, forceNew = True)
1653 
1654  runMin = options.runMin if options.runMin is not None else 0
1655  runMax = options.runMax if options.runMax is not None else (1 << 31)-1
1656 
1657  writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1658  runMin=runMin, runMax=runMax,
1659  status=int(args[1]),
1660  posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1661  sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1662  tiltX=float(args[8]) if len(args)>8 else 0.,
1663  tiltY=float(args[9]) if len(args)>9 else 0.,
1664  sigmaXY=float(args[10]) if len(args)>10 else 0.,
1665  posXErr=0., posYErr=0., posZErr=0.,
1666  sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1667  tiltXErr=0., tiltYErr=0.,
1668  sigmaXYErr=0.)
1669 
1670  print ('* Beamspot tag written to db=%s, tag=%s in %s ' %(dbName,options.beamspottag, dbfile))
1671  print (' - AtlCoolConsole.py "sqlite://;schema=' + dbfile + ';dbname=%s"' %(dbName))
1672  print ('* To upload to oracle use:')
1673  print (' - beamspotman.py --srctag %s -t %s --srcdbname %s --destdbname %s upload %s' %(options.beamspottag, options.beamspottag, dbName, dbName, dbfile))
1674  print (' - /afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_INDET_W <passwd>' %(dbfile, dbName))
1675  sys.exit(0)
1676 
1677 
1678 print ('ERROR: Illegal command or number of arguments ({})'.format(' '.join(args)))
1679 sys.exit(1)
DerivationFramework::TriggerMatchingUtils::sorted
std::vector< typename R::value_type > sorted(const R &r, PROJ proj={})
Helper function to create a sorted vector from an unsorted range.
vtune_athena.format
format
Definition: vtune_athena.py:14
python.TaskManager.getJobConfig
def getJobConfig(jobDir, dsName, taskName, jobName=' *')
Definition: TaskManager.py:101
beamspotman.getT0DbConnection
def getT0DbConnection()
Definition: beamspotman.py:167
beamspotman.dataset_from_run_and_tag
def dataset_from_run_and_tag(run, tag)
Definition: beamspotman.py:204
python.TaskManager.appendUnique
def appendUnique(s, v)
Definition: TaskManager.py:64
buildDatabase.getKey
def getKey(filename)
Definition: buildDatabase.py:545
python.Utils.getRunFromName
def getRunFromName(name, default='', asInt=False)
Definition: InnerDetector/InDetExample/InDetBeamSpotExample/python/Utils.py:13
dumpHVPathFromNtuple.append
bool append
Definition: dumpHVPathFromNtuple.py:91
TaskManager
python.COOLUtils.writeBeamSpotEntry
def writeBeamSpotEntry(folderHandle, tag='nominal', runMin=0, runMax=(1<< 31) -1, lbMin=0, lbMax=(1<< 32) -2, status=0, posX=0., posY=0., posZ=0., sigmaX=30., sigmaY=30., sigmaZ=500., tiltX=0., tiltY=0., sigmaXY=0., posXErr=0., posYErr=0., posZErr=0., sigmaXErr=0., sigmaYErr=0., sigmaZErr=0., tiltXErr=0., tiltYErr=0., sigmaXYErr=0.)
Definition: COOLUtils.py:63
convertTimingResiduals.sum
sum
Definition: convertTimingResiduals.py:55
fillPileUpNoiseLumi.next
next
Definition: fillPileUpNoiseLumi.py:52
beamspotman.run_jobs
def run_jobs(script, ds_name, task_name, params, *args)
Definition: beamspotman.py:220
python.TaskManager.getFullTaskNames
def getFullTaskNames(taskman, dsname, taskname, requireSingleTask=False, confirmWithUser=False, addWildCards=True)
Definition: TaskManager.py:72
PyAthena::repr
std::string repr(PyObject *o)
returns the string representation of a python object equivalent of calling repr(o) in python
Definition: PyAthenaUtils.cxx:106
histSizes.list
def list(name, path='/')
Definition: histSizes.py:38
GlobalMonitoring.doPostProcessing
doPostProcessing
Definition: GlobalMonitoring.py:140
CxxUtils::set
constexpr std::enable_if_t< is_bitmask_v< E >, E & > set(E &lhs, E rhs)
Convenience function to set bits in a class enum bitmask.
Definition: bitmask.h:232
beamspotman.BeamSpotOption
Definition: beamspotman.py:76
print
void print(char *figname, TCanvas *c1)
Definition: TRTCalib_StrawStatusPlots.cxx:25
TCS::join
std::string join(const std::vector< std::string > &v, const char c=',')
Definition: Trigger/TrigT1/L1Topo/L1TopoCommon/Root/StringUtils.cxx:10
beamspotman.check_commsep
def check_commsep(option, opt, value)
Definition: beamspotman.py:74
COOLUtils
Trk::open
@ open
Definition: BinningType.h:40
ActsTrk::detail::MakeDerivedVariant::extend
constexpr std::variant< Args..., T > extend(const std::variant< Args... > &, const T &)
Definition: MakeDerivedVariant.h:17
python.CaloAddPedShiftConfig.int
int
Definition: CaloAddPedShiftConfig.py:45
beamspotman.getTaskManager
def getTaskManager()
Definition: beamspotman.py:191
str
Definition: BTagTrackIpAccessor.cxx:11
python.COOLUtils.openBeamSpotDbFile
def openBeamSpotDbFile(fileName, forceNew=False, folderName='/Indet/Beampos', dbName='BEAMSPOT')
Definition: COOLUtils.py:22
calibdata.copy
bool copy
Definition: calibdata.py:26
Trk::split
@ split
Definition: LayerMaterialProperties.h:38
beamspotman.fail
def fail(message)
Definition: beamspotman.py:199
python.LArMinBiasAlgConfig.float
float
Definition: LArMinBiasAlgConfig.py:65