ATLAS Offline Software
beamspotman.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 
3 # Copyright (C) 2002-2023 CERN for the benefit of the ATLAS collaboration
4 
5 from __future__ import print_function
6 
7 """
8 beamspotman is a command line utility to do typical beam spot related tasks.
9 """
10 __authors__ = ['Juerg Beringer', 'Carl Suster']
11 __version__ = 'beamspotman.py atlas/athena'
12 __usage__ = '''%prog [options] command [args ...]
13 
14 Commands are:
15 
16 show RUNNR For given run, show data on CAF and beam spot jobs
17 run RUNNR TAG Run standard beam spot reconstruction job
18 runCmd DSNAME TASKNAME CMD Repeat command for matching tasks (use %(DSNAME)s and
19  %(TASKNAME)s in the command string CMD to specify the
20  dataset and task names, if needed)
21 runMon RUNNR TAG Run standard beam spot monitoring job
22 runMonJobs [MAXTIME] Run monitoring jobs for jobs completed at most MAXTIME ago
23 postproc Default postprocessing for any task that needs it
24 postproc DSNAME TASKNAME [WHATLIST] Postprocessing for a selected set of tasks
25 upload DBFILE Upload SQLite file into COOL (independent of task)
26 upload DSNAME TASKNAME Upload result of beam spot determination into COOL
27 dq2get DSNAME TASKNAME Retrieve task data from grid job (must have set up grid env.)
28 queryT0 DSNAME TASKNAME Query Tier-0 database about a task
29 backup DIR Backup directory DIR (may contain wildcards) to EOS
30 archive DSNAME TASKNAME Archive task data (deletes files after copying to EOS,
31  sets on-disk status to ARCHIVED)
32 lsbackup List contents of backup directory on EOS
33 reproc TEMPLATE DSNAME TASKNAME INPUTDIR Run reprocessing task consisting of several jobs split into 5 LBs
34  over the files in INPUTDIR
35 runaod TEMPLATE DSNAME TASKNAME INPUTDIR Run over AOD, splitting jobs into sets of N LBs (similar
36  to reproc command, except for variable params)
37 resubmit DSNAME TASKNAME Rerun jobs of a specific task (choose queue with -q QUEUE)
38 dqflag DBFILE Upload DQ SQLite file into COOL (independent of task)
39 dqflag DSNAME TASKNAME Upload result of beam spot DQ flag determination into COOL
40 runBCID RUNNR TAG Run standard beam spot BCID job
41 runBCIDJobs [MAXTIME] Run BCID jobs for jobs completed at most MAXTIME ago
42 mctag STATUS POSX POSY POSZ Create an sqlite file containing a MC tag with the
43  SIGMAX SIGMAY SIGMAZ parameters given (and zero errors). A la beamSpot_set.py.
44  [TILTX TILTY SIGMAXY]
45 
46 
47 '''
48 
49 proddir = '/afs/cern.ch/user/a/atlidbs/jobs'
50 produserfile = '/afs/cern.ch/user/a/atlidbs/private/produsers.dat'
51 prodcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/coolinfo.dat'
52 flaskcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/flaskinfo.dat'
53 proddqcoolpasswdfile = '/afs/cern.ch/user/a/atlidbs/private/cooldqinfo.dat'
54 tier0dbinfofile = '/afs/cern.ch/user/a/atlidbs/private/t0dbinfo.dat'
55 beamspottag = ''
56 backuppath = '/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/backup'
57 archivepath = '/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/archive'
58 
59 import sys, os, stat
60 import re
61 import subprocess
62 import time
63 from copy import copy
64 
66 from InDetBeamSpotExample.PostProcessing import doPostProcessing
67 from InDetBeamSpotExample import BeamSpotPostProcessing
68 from InDetBeamSpotExample import COOLUtils
69 from InDetBeamSpotExample import DiskUtils
70 
71 from future import standard_library
72 standard_library.install_aliases()
73 import subprocess
74 
75 from optparse import Option, OptionParser, OptionGroup
76 def check_commsep(option, opt, value):
77  return re.split('\s*,\s*|\s+', value)
78 class BeamSpotOption(Option):
79  TYPES = Option.TYPES + ('commsep',)
80  TYPE_CHECKER = copy(Option.TYPE_CHECKER)
81  TYPE_CHECKER['commsep'] = check_commsep
82 
83 parser = OptionParser(usage=__usage__, version=__version__, option_class=BeamSpotOption)
84 
85 g_input = OptionGroup(parser, 'Input file options')
86 g_input.add_option('', '--eos', dest='eos', default=False, action='store_true', help='access files over EOS (not needed if /eos is mounted)')
87 g_input.add_option('-e', '--eospath', dest='eospath', default='/eos/atlas/atlastier0/rucio', help='eos path (excluding project and stream name)')
88 g_input.add_option('-p', '--project', dest='project', default='data17_13TeV', help='project name')
89 g_input.add_option('-s', '--stream', dest='stream', default='calibration_BeamSpot', help='stream name')
90 g_input.add_option('-f', '--filter', dest='filter', default='.*\.AOD\..*', help='regular expression to filter input files')
91 g_input.add_option('', '--lbfilemap', dest='lbfilemap', default='', help='text file with mapping between filename and lumi blocks')
92 g_input.add_option('', '--rucio', dest='rucio', action='store_true', default=False, help='rucio directory structure')
93 g_input.add_option('', '--dpdinput', dest='dpdinput', action='store_true', default=False, help='Run over DPD for runaod')
94 g_input.add_option('-l', '--filelist', dest='filelist', default=None, help='Explicit list of files for reproc command')
95 g_input.add_option('', '--removedups', dest='removedups', action='store_true', default=False, help='Remove duplicate retry files of form root.N, keeping the latest')
96 parser.add_option_group(g_input)
97 
98 g_mode = OptionGroup(parser, 'Mode flags')
99 g_mode.add_option('-b', '--batch', dest='batch', action='store_true', default=False, help='batch mode - never ask for confirmation')
100 g_mode.add_option('', '--test', dest='testonly', action='store_true', default=False, help='for runaod, show only options and input files')
101 g_mode.add_option('', '--expertmode', dest='expertmode', action='store_true', default=False, help='expert mode (BE VERY CAREFUL)')
102 g_mode.add_option('', '--ignoremode', dest='ignoremode', default='', help='ignore update protection mode (needs expert pwd)')
103 parser.add_option_group(g_mode)
104 
105 # Other options:
106 parser.add_option('-t', '--beamspottag', dest='beamspottag', default=beamspottag, help='beam spot tag')
107 parser.add_option('-d', '--dbconn', dest='dbconn', default='', help='task manager database connection string (default: check TASKDB, otherwise use sqlite_file:taskdata.db)')
108 parser.add_option('', '--proddir', dest='proddir', default=proddir, help='production directory (default: %s' % proddir)
109 parser.add_option('', '--destdbname', dest='destdbname', default='CONDBR2', help='destination database instance name (default: CONDBR2)')
110 parser.add_option('', '--srcdbname', dest='srcdbname', default='BEAMSPOT', help='source database instance name (default: BEAMSPOT)')
111 parser.add_option('', '--srctag', dest='srctag', default='nominal', help='source tag (default: nominal)')
112 parser.add_option('-r', '--runjoboptions', dest='runjoboptions', default='InDetBeamSpotExample/VertexTemplate.py', help='template to run beam spot jobs')
113 parser.add_option('', '--runtaskname', dest='runtaskname', default='VTX', help='task name')
114 parser.add_option('-m', '--monjoboptions', dest='monjoboptions', default='runBeamSpotMonitor.py', help='template to run monitoring jobs')
115 parser.add_option('', '--bcidjoboptions', dest='bcidjoboptions', default='InDetBeamSpotExample/VertexTemplate.py', help='template to run BCID jobs')
116 parser.add_option('', '--montaskname', dest='montaskname', default='MON', help='task name')
117 parser.add_option('', '--bcidtaskname', dest='bcidtaskname', default='BCID', help='BCID task name')
118 parser.add_option('-g', '--griduser', dest='griduser', default='user10.JuergBeringer', help='grid user name prefix (e.g. user10.JuergBeringer)')
119 parser.add_option('-n', '--nowildcards', dest='nowildcards', action='store_true', default=False, help='do not add wildcards when looking up dataset and task names')
120 parser.add_option('-x', '--excludeiftask', dest='excludeiftask', default='', help='Exclude running cmd with runCmd if such task exists already')
121 parser.add_option('', '--excludeds', dest='excludeds', default='', help='Exclude running cmd with runCmd for dataset containing a certain string')
122 parser.add_option('', '--archive', dest='archive', action='store_true', default=False, help='archive, ie delete data after backup')
123 parser.add_option('', '--incremental', dest='incremental', action='store_true', default=False, help='incremental backup')
124 parser.add_option('', '--lbperjob', dest='lbperjob', type='int', default=5, help='number of luminosity blocks per job (default: 5)')
125 parser.add_option('', '--params', dest='params', default='', help='job option parameters to pass to job option template')
126 parser.add_option('-z', '--postprocsteps', dest='postprocsteps', type='commsep', default=['JobPostProcessing'], help='Task-level postprocessing steps, comma-separated (Default: JobPostProcessing)')
127 parser.add_option('', '--srcdqtag', dest='srcdqtag', default='nominal', help='source DQ tag (default: nominal)')
128 parser.add_option('', '--dqtag', dest='dqtag', default='HEAD', help='beam spot DQ tag')
129 parser.add_option('', '--destdqdbname', dest='destdqdbname', default='CONDBR2', help='DQ destination database instance name (default: CONDBR2)')
130 parser.add_option('', '--srcdqdbname', dest='srcdqdbname', default='IDBSDQ', help='DQ source database instance name (default: IDBSDQT)')
131 parser.add_option('', '--dslist', dest='dslist', default='', help='Exclude running cmd with runCmd if the dataset is not in this file')
132 parser.add_option('', '--nominbias', dest='nominbias', action='store_true', default=False, help='overwrite MinBias_physics in DSNAME with express_express')
133 parser.add_option('', '--resultsondisk', dest='resultsondisk', action='store_true', default=False, help='Leave the results on disk when archiving')
134 parser.add_option('', '--pLbFile', dest='pseudoLbFile', default=None, help='File for pseudo LB info from scan')
135 parser.add_option('', '--prefix', dest='prefix', default='', help='Prefix for reading files from mass storage (Default: determine from filename (`\'\') ')
136 parser.add_option('', '--rl', dest='runMin', type='int', default=None, help='Minimum run number for mctag (inclusive)')
137 parser.add_option('', '--ru', dest='runMax', type='int', default=None, help='Maximum run number for mctag (inclusive)')
138 parser.add_option('', '--useRun', dest='useRun', type='int', default=None, help='Run monitoring job for a given run only')
139 parser.add_option('', '--noCheckAcqFlag', dest='noCheckAcqFlag', action='store_true', default=False, help='Don\'t check acqFlag when submitting VdM jobs')
140 parser.add_option('', '--resubAll', dest='resubAll', action='store_true', default=False, help='Resubmit all jobs irrespective of status')
141 parser.add_option('', '--resubRunning', dest='resubRunning', action='store_true', default=False, help='Resubmit all "running" jobs')
142 parser.add_option('-q', '--queue', dest='batch_queue', default=None, help='Name of batch queue to use (default is context-specific)')
143 
144 g_deprecated = OptionGroup(parser, 'Deprecated Options')
145 g_deprecated.add_option('', '--mon', dest='legacy_mon', action='store_true', default=False, help='mon directory structure (now inferred from montaskname)')
146 parser.add_option_group(g_deprecated)
147 
148 (options, args) = parser.parse_args()
149 if len(args) < 1: parser.error('wrong number of command line arguments')
150 cmd = args[0]
151 cmdargs = args[1:]
152 
153 # General error checking (skipped in expert mode to allow testing)
154 if not options.expertmode:
155  if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
156  sys.exit('ERROR: You must run this command in the production directory %s' % options.proddir)
157  if not os.path.exists(produserfile):
158  sys.exit('ERROR: Authorization file unreadable or does not exists %s' % produserfile)
159  if not subprocess.getoutput('grep `whoami` %s' % produserfile):
160  sys.exit('ERROR: You are not authorized to run this command (user name must be listed in produser file %s)' % produserfile)
161 else:
162  if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
163  print ('WARNING: You are not running in the production directory %s' % options.proddir)
164 
165 
166 #
167 # Utilities
168 #
170  try:
171  with open(tier0dbinfofile, 'r') as dbinfofile:
172  connstring = dbinfofile.read().strip()
173  except:
174  sys.exit('ERROR: Unable to read connection information for Tier-0 database')
175  dbtype, dbname = connstring.split(':',1)
176  if dbtype!='oracle':
177  sys.exit('ERROR: Invalid T0 connection string')
178  import cx_Oracle
179  try:
180  oracle = cx_Oracle.connect(dbname)
181  except:
182  print ('ERROR: First connection attempt to Tier-0 Oracle database failed; will retry in 10s ...')
183  try:
184  time.sleep(10)
185  oracle = cx_Oracle.connect(dbname)
186  except Exception as e:
187  print (e)
188  sys.exit('ERROR: Unable to connect to Tier-0 Oracle database')
189  if not oracle:
190  sys.exit('ERROR: Unable to connect to Tier-0 Oracle database (invalid cx_Oracle connection)')
191  return oracle
192 
194  ''' Open task manager (used by all following commands, at the very least through subcommands) '''
195  try:
196  return TaskManager(options.dbconn)
197  except:
198  print ('ERROR: Unable to access task manager database %s' % options.dbconn)
199  sys.exit(1)
200 
201 def fail(message):
202  print()
203  print ('ERROR:', message)
204  sys.exit(1)
205 
207  ''' Given a run number and tag, check input dataset and work out name. '''
208  fs = DiskUtils.FileSet.from_ds_info(run,
209  project=options.project,
210  stream=options.stream,
211  base=options.eospath)
212  datasets = list(fs
213  .strict_mode()
214  .use_files_from(options.filelist)
215  .matching(options.filter + tag + '.*')
216  .excluding(r'.*\.TMP\.log.*')
217  .only_single_dataset())
218  dataset = os.path.dirname(datasets[0])
219  dsname = '.'.join(os.path.basename(datasets[0]).split('.')[:3])
220  return (dataset, dsname)
221 
222 def run_jobs(script, ds_name, task_name, params, *args):
223  ''' Invoke runJobs.py '''
224  arg_list = ['runJobs']
225  arg_list.extend(map(str, args))
226  if params:
227  param_args = []
228  for k,v in params.items():
229  param_args.append("{}={}".format(k,repr(v)))
230  arg_list.extend(['--params', ', '.join(param_args)])
231  if options.testonly:
232  arg_list.append('--test')
233  arg_list.extend([script, ds_name, task_name])
234 
235  print (subprocess.list2cmdline(arg_list))
236  subprocess.check_call(arg_list)
237 
238 #
239 # Upload any SQLite file to COOL (independent of task, w/o book keeping)
240 #
241 if cmd == 'upload' and len(cmdargs) == 1:
242  dbfile = args[1]
243  if not options.beamspottag:
244  fail('No beam spot tag specified')
245  try:
246  with open(prodcoolpasswdfile, 'r') as passwdfile:
247  passwd = passwdfile.read().strip()
248  except:
249  fail('Unable to determine COOL upload password')
250 
251  try:
252  with open(flaskcoolpasswdfile, 'r') as flaskpasswdfile:
253  flaskpasswd = flaskpasswdfile.read().strip()
254  except:
255  fail('Unable to determine FLASK upload password')
256 
257  print()
258  print ('Beam spot file: ', dbfile)
259  print ('Uploading to tag: ', options.beamspottag)
260  os.system('dumpBeamSpot.py -d %s -t %s %s' % (
261  options.srcdbname,
262  options.srctag,
263  dbfile))
264 
265  print()
266  stat = os.system('/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (
267  flaskpasswd,
268  '--batch' if options.batch else '',
269  ('--ignoremode %s' % options.ignoremode) if options.ignoremode else '',
270  options.srctag,
271  options.beamspottag,
272  options.destdbname,
273  dbfile,
274  options.srcdbname,
275  passwd))
276 
277  if stat: fail("UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!")
278  sys.exit(0)
279 
280 #
281 # Run beam spot reconstruction jobs
282 #
283 if cmd=='run' and len(args)==3:
284  run = args[1]
285  tag = args[2]
286  dataset, dsname = dataset_from_run_and_tag(run, tag)
287 
288  run_jobs(options.runjoboptions, dsname,
289  '{}-LR5.{}'.format(options.runtaskname, tag),
290  {
291  'LumiRange' : 5,
292  },
293  '--files-per-job', 0,
294  '--match', options.filter,
295  '--exclude', r'.*\.TMP\.log.*',
296  '--directory', dataset)
297  sys.exit(0)
298 
299 
300 #
301 # Run beam spot monitoring job
302 #
303 if cmd=='runMon' and len(args)==3:
304  run = args[1]
305  tag = args[2]
306 
307  if not options.beamspottag:
308  fail('No beam spot tag specified')
309 
310  dataset, dsname = dataset_from_run_and_tag(run, tag)
311 
312  # NOTE: The command below may be executed via a cron job, so we need set STAGE_SVCCLASS
313  # explicitly in all cases, since it may not be inherited from the environment.
314  if 'ESD' in options.filter:
315  # NOTE: We pass along the filter setting, but currently we can do --lbperjob only for ESD since for
316  # other data sets we have only the merged files.
317  run_jobs(options.monjoboptions, dsname,
318  '{}.{}'.format(options.montaskname, tag),
319  {
320  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
321  'useBeamSpot' : True,
322  'beamspottag' : options.beamspottag,
323  },
324  '--lbperjob', 10,
325  '--match', options.filter,
326  '--exclude', r'.*\.TMP\.log.*',
327  '--directory', dataset)
328  elif options.filelist != None:
329  lbinfoinfiles = True
330  for line in open(options.filelist, 'r'):
331  if "lb" not in line:
332  lbinfoinfiles = False
333  break
334  lboptions='--lbperjob=10' if lbinfoinfiles else '--files-per-job=10'
335  run_jobs(options.monjoboptions, dsname, '{}.{}'.format(options.montaskname, tag),
336  {
337  'tracksAlreadyOnBeamLine' : True,
338  'useBeamSpot' : True,
339  'beamspottag' : options.beamspottag,
340  },
341  lboptions,
342  '--match', options.filter,
343  '--exclude', r'.*\.TMP\.log.*',
344  '--directory', dataset,
345  '--queue', '"tomorrow"')
346  else:
347  queue = options.batch_queue or '"tomorrow"'
348  print('Queue: ', queue )
349 
350  params = {
351  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
352  'useBeamSpot' : True,
353  'beamspottag' : options.beamspottag
354  }
355 
356  # Additional job parameters
357  for s in options.params.split(', '):
358  if s:
359  try:
360  p = s.split('=',1)
361  params[p[0].strip()] = eval(p[1].strip())
362  except:
363  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
364 
365  run_jobs(options.monjoboptions, dsname, '{}.{}'.format(options.montaskname, tag),params,
366  '--lbperjob', 20,
367  '--match', options.filter,
368  '--exclude', r'.*\.TMP\.log.*',
369  '--directory', dataset,
370  '--queue', queue)
371 
372  sys.exit(0)
373 
374 
375 #
376 # Backup to EOS
377 #
378 if cmd=='backup' and len(args)==2:
379  if options.archive and options.incremental:
380  sys.exit('ERROR: Cannot do incremental archiving')
381  tmplist = glob.glob(args[1]+'/')
382  dirlist = []
383  for dslash in tmplist:
384  d = dslash[:-1] # Remove trailing /
385  if options.incremental:
386  baklog = d+'/backup.log'
387  if os.path.exists(baklog):
388  cmd = 'find %s -maxdepth 1 -newer %s' % (d,baklog)
389  (status,output) = subprocess.getstatusoutput(cmd)
390  status = status >> 8
391  if status:
392  sys.exit("ERROR: Error executing %s" % cmd)
393  if output:
394  dirlist.append(d)
395  else:
396  dirlist.append(d)
397  pass
398  else:
399  dirlist.append(d)
400  print ('\nFound %i directories for backup:\n' % len(dirlist))
401  for d in dirlist:
402  print (' %s' % d)
403  if options.archive:
404  print ('\n****************************************************************')
405  print ('WARNING: ARCHIVING - DIRECTORIES WILL BE DELETED AFTER BACKUP!!!')
406  print ('****************************************************************')
407  if not options.batch:
408  a = input('\nARE YOU SURE [n] ? ')
409  if a!='y':
410  sys.exit('ERROR: Aborted by user')
411  for d in dirlist:
412  tmpdir = '/tmp'
413  if options.archive:
414  outname = d.replace('/','-')+time.strftime('-%G_%m_%d.tar.gz')
415  path = archivepath
416  else:
417  outname = d.replace('/','-')+'.tar.gz'
418  path = backuppath
419 
420  print ('\nBacking up %s --> %s/%s ...\n' % (d,path,outname))
421 
422  status = os.system('tar czf %s/%s %s' % (tmpdir,outname,d)) >> 8
423  if status:
424  sys.exit('\nERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
425 
426  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
427 
428  if status:
429  os.system('rm -rf %s/%s' % (path,outname) )
430  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
431  if status:
432  # Continue to try other files if one failed to upload to EOS
433  print ('\nERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
434  continue
435 
436  os.system('rm %s/%s' % (tmpdir,outname))
437  status = os.system('echo "`date` %s/%s" >> %s/backup.log' % (path,outname,d)) >> 8
438  if status:
439  sys.exit('\nERROR: Could not update backup log file')
440 
441  if options.archive:
442  print ('\nWARNING: DELETION OF SOURCE FILES NOT YET IMPLEMENTED\n')
443  sys.exit(0)
444 
445 
446 #
447 # List backup directory on EOS
448 #
449 if cmd == 'lsbackup' and not cmdargs:
450  path = archivepath if options.archive else backuppath
451  print ('Backup directory:', path)
452  for f in DiskUtils.from_directory(path):
453  print (f)
454  sys.exit(0)
455 
456 
457 #
458 # Show available data sets
459 #
460 if cmd == 'show' and len(cmdargs)==1:
461  run = cmdargs[0]
462 
463  print ('Base path: ', options.eospath)
464  print ('Project tag: ', options.project)
465  print ('Stream: ', options.stream)
466  print()
467  print ('Files available (filtered by: {}):'.format(options.filter))
468  print ('---------------')
469 
470  fs = DiskUtils.FileSet.from_ds_info(run,
471  project=options.project,
472  stream=options.stream,
473  base=options.eospath)
474  for f in fs.matching(options.filter):
475  print (f)
476 
477  print()
478  print ('Beam spot tasks:')
479  print ('---------------')
480  with getTaskManager() as taskman:
481  for t in taskman.taskIterDict(qual=["where DSNAME like '%%%s%%' order by UPDATED" % run]):
482  print ('%-45s %-45s %4s job(s), last update %s' % (t['DSNAME'],t['TASKNAME'],t['NJOBS'],time.ctime(t['UPDATED'])))
483  sys.exit(0)
484 
485 
486 #
487 # Postprocessing: for all tasks requiring it, or for a selected set of tasks
488 #
489 if cmd == 'postproc' and not cmdargs:
490  with getTaskManager() as taskman:
491  for t in taskman.taskIterDict(
492  qual=['where STATUS > %i and STATUS <= %i order by UPDATED' % (
493  TaskManager.StatusCodes['RUNNING'],
494  TaskManager.StatusCodes['POSTPROCESSING'])]):
495  doPostProcessing(taskman, t, t['TASKPOSTPROCSTEPS'].split(), BeamSpotPostProcessing)
496  sys.exit(0)
497 
498 if cmd == 'postproc' and len(cmdargs) in [2,3]:
499  dsname = cmdargs[0]
500  taskname = cmdargs[1]
501 
502  # for backwards compatibility these are equivalent:
503  # $0 postproc DSNAME TASKNAME POSTPROCSTEPS
504  # $0 -z POSTPROCSTEPS postproc DSNAME TASKNAME
505  steps = options.postprocsteps if len(cmdargs) < 3 else cmdargs[2].split(',')
506  if steps:
507  print ('Executing postprocessing tasks:', steps)
508  else:
509  print ('Executing postprocessing tasks as specified in task database')
510  print()
511 
512  with getTaskManager() as taskman:
513  try:
514  taskList = getFullTaskNames(taskman,
515  dsname,
516  taskname,
517  confirmWithUser=not options.batch,
518  addWildCards=not options.nowildcards)
519  except TaskManagerCheckError as e:
520  fail(e)
521  for taskName in taskList:
522  t = taskman.getTaskDict(taskName[0], taskName[1])
523  if steps:
524  doPostProcessing(taskman, t, steps, BeamSpotPostProcessing, forceRun=True)
525  else:
526  doPostProcessing(taskman, t, t['TASKPOSTPROCSTEPS'].split(), BeamSpotPostProcessing, forceRun=True)
527  sys.exit(0)
528 
529 
530 #
531 # Upload beam spot data for given task to COOL
532 #
533 if cmd=='upload' and len(args)==3:
534  with getTaskManager() as taskman:
535  try:
536  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
537  except TaskManagerCheckError as e:
538  print (e)
539  sys.exit(1)
540  if not options.beamspottag:
541  sys.exit('ERROR: No beam spot tag specified')
542 
543  dbfile = glob.glob('%s/%s/*-beamspot.db' % (dsname,task))
544  if len(dbfile)!=1:
545  print ('ERROR: Missing or ambiguous input COOL file:',dbfile)
546  sys.exit()
547 
548  try:
549  with open(prodcoolpasswdfile, 'r') as passwdfile:
550  passwd = passwdfile.read().strip()
551  except:
552  sys.exit('ERROR: Unable to determine COOL upload password')
553 
554  try:
555  with open(flaskcoolpasswdfile, 'r') as flaskpasswdfile:
556  flaskpasswd = flaskpasswdfile.read().strip()
557  except:
558  fail('Unable to determine FLASK upload password')
559 
560  print ('\nData set: ',dsname)
561  print ('Beam spot file: ',dbfile[0])
562  print ('Uploading to tag: ',options.beamspottag)
563  os.system('dumpBeamSpot.py -d %s -t %s %s' % (options.srcdbname,options.srctag,dbfile[0]))
564 
565  if options.ignoremode:
566  ignoremode = '--passopt="--appendlocked --ignoremode %s"' % options.ignoremode
567  else:
568  ignoremode = ''
569  if options.batch:
570  batchmode = '--batch'
571  else:
572  batchmode = ''
573 
574  print('command: /afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret <flaskpassword> --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W <password>' % (batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname))
575  stat = os.system('/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (flaskpasswd,batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname,passwd))
576 
577  if stat:
578  print ("\n\nERROR: UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
579  sys.exit(1)
580 
581  # Record upload information, both in task DB (field COOLTAGS) and in upload flag file
582  uploadflag = dbfile[0]+'.uploaded'
583  stat = os.system('echo "`date` %s" >> %s' % (options.beamspottag,uploadflag) )
584  if stat:
585  print ("ERROR: Uploading was successful, but unable to set upload flag", uploadflag )
586 
587  cooltags = options.beamspottag
588 
589  # If uploading to the beamspot tag linked to Current BLK alias and Next BLK alias aslo exists (not '')
590  # and points to different tag then AtlCoolMerge will upload to the Next beamspot tag too. Hence record
591  # that in COOLTAGS too. Mimik logic in AtlCoolMergeLib.py
592 
593  nextbeamspot = ''
594  try:
595  nextbeamspot = COOLUtils.resolveNextBeamSpotFolder()
596  except:
597  nextbeamspot = ''
598  if nextbeamspot == options.beamspottag:
599  nextbeamspot = ''
600  if not ('UPD' in options.beamspottag and 'UPD' in nextbeamspot):
601  nextbeamspot = ''
602  if nextbeamspot != '':
603  print ('Additionally uploading to Next tag: ',nextbeamspot)
604  cooltags = appendUnique(cooltags, nextbeamspot)
605 
606  # Upload tag(s) to TaskManager
607  t = taskman.getTaskDict(dsname,task)
608  taskman.setValue(dsname,task,'COOLTAGS',appendUnique(t['COOLTAGS'],cooltags))
609 
610  sys.exit(0)
611 
612 
613 #
614 # Retrieve task data from grid job
615 #
616 if cmd=='dq2get' and len(args)==3:
617  try:
618  with getTaskManager() as taskman:
619  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,addWildCards=not options.nowildcards)
620  except TaskManagerCheckError as e:
621  print (e)
622  sys.exit(1)
623  dir = os.path.join(dsname, task)
624  griddsname = '%s.%s-%s' % (options.griduser,dsname,task)
625  path = os.path.join(dir, griddsname)
626  if os.path.exists(path):
627  print ('ERROR: Path exists already:',path)
628  sys.exit(1)
629  print ('Extracting into %s/%s ...' % (dir,griddsname))
630  stat = os.system('cd %s; dq2-get -T 6,6 %s' % (dir,griddsname))
631  if stat:
632  print ("ERROR: Problem occurred while extracting data set - task status not changed")
633 
634  statfile = glob.glob('%s/000/*.status.SUBMITTED' % (dir))
635  if len(statfile)!=1:
636  print ("ERROR: Unable to uniquely identfying status - giving up. Candidate status files are:")
637  print (statfile)
638  sys.exit(1)
639  os.system('rm %s' % statfile[0])
640  basename = statfile[0][:-17]
641  os.system('touch %s.exit.0' % basename)
642  os.system('touch %s.COMPLETED' % basename)
643  print ("\nSuccessfully downloaded data set",griddsname)
644  os.system('du -hs %s' % path)
645  print()
646  sys.exit(0)
647 
648 
649 #
650 # Query T0 task status
651 #
652 if cmd=='queryT0' and len(args)==3:
653  try:
654  with getTaskManager() as taskman:
655  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,addWildCards=not options.nowildcards)
656  except TaskManagerCheckError as e:
657  print (e)
658  sys.exit(1)
659  tags = task.split('.')[-1]
660  # FIXME: change the following to automatically determine from the input files of the DB_BEAMSPOT job?
661  if 'ESD' in options.filter:
662  t0TaskName = '%s.recon.ESD.%s.beamspotproc.task' % (dsname,tags)
663  else:
664  if any(t[0] == 'm' for t in tags.split('_')):
665  t0TaskName = '%s.merge.AOD.%s.beamspotproc.task' % (dsname,tags)
666  else:
667  t0TaskName = '%s.recon.AOD.%s.beamspotproc.task' % (dsname,tags)
668 
669  print ('Querying Tier-0 database for task',t0TaskName,'...')
670  oracle = getT0DbConnection()
671  cur = oracle.cursor()
672  try:
673  #sql = str("SELECT status FROM etask WHERE taskname='%s' AND creationtime>sysdate-10" % t0TaskName) # Oracle doesn't want unicode
674  sql = str("SELECT status FROM tasks WHERE taskname='%s' AND tasktype='beamspotproc'" % t0TaskName) # Oracle doesn't want unicode
675  cur.execute(sql)
676  r = cur.fetchall()
677  except Exception as e:
678  print (e)
679  sys.exit('ERROR: Unable to retrieve status of task %s' % t0TaskName)
680  if len(r)==0:
681  sys.exit('ERROR: No such task found: %s' % t0TaskName)
682  if len(r)>1:
683  sys.exit('ERROR: %i tasks found - unable to determine task status' % len(r))
684  status = r[0][0]
685  print ('\nTask status = %s\n' % status)
686  if status=='FINISHED' or status=='TRUNCATED':
687  sys.exit(0)
688  else:
689  sys.exit(2)
690 
691 
692 #
693 # Run command over set of matching tasks
694 #
695 if cmd=='runCmd' and len(args)==4:
696  dssel = args[1]
697  tasksel = args[2]
698  cmd = args[3]
699  if options.nowildcards:
700  qual = ["where DSNAME like '%s' and TASKNAME like '%s'" % (dssel,tasksel)]
701  else:
702  qual = ["where DSNAME like '%%%s%%' and TASKNAME like '%%%s%%'" % (dssel,tasksel)]
703  if options.beamspottag:
704  qual.append("and COOLTAGS like '%%%s%%'" % options.beamspottag)
705  #qual.append("order by RUNNR desc")
706  qual.append("order by RUNNR")
707  print ('Running command\n')
708  print (' ',cmd % ({'DSNAME': 'DSNAME', 'TASKNAME': 'TASKNAME', 'RUNNR': 'RUNNR', 'FULLDSNAME': 'FULLDSNAME'}))
709  print ('\nover the following datasets / tasks:\n')
710  print (' %-10s %-40s %s' % ('Run','Dataset','Task'))
711  print (' %s' % (74*'-'))
712 
713  taskList = []
714  with getTaskManager() as taskman:
715  for t in taskman.taskIterDict('*',qual):
716  dsname = t['DSNAME']
717 
718  # Deal with express vs minBias in 900 GeV runs
719  if options.nominbias and dsname.find('physics_MinBias') :
720  print ('Warning: changing physics_MinBias in dsname to express_express')
721  dsname = dsname.replace('physics_MinBias','express_express')
722  t['DSNAME'] = dsname
723 
724  runnr = t['RUNNR']
725  taskname = t['TASKNAME']
726  print (' %-10s %-40s %s'% (runnr,dsname,taskname))
727 
728  if options.excludeiftask:
729  if options.nowildcards:
730  # n = taskman.getNTasks(['where DSNAME =',DbParam(dsname),'and TASKNAME like',DbParam(options.excludeiftask)])
731  n = taskman.getNTasks(["where DSNAME ='%s' and TASKNAME like '%s'" % (dsname,options.excludeiftask)])
732  else:
733  n = taskman.getNTasks(["where DSNAME ='%s' and TASKNAME like '%%%s%%'" % (dsname,options.excludeiftask)])
734  if n!=0:
735  print (' ==> SKIPPING - %i matching tasks found' % n)
736  continue
737 
738  if options.excludeds:
739  excludeList = options.excludeds.split(',')
740  for s in excludeList:
741  if s in dsname:
742  print (' ==> SKIPPING dataset')
743  continue
744 
745  if options.dslist:
746  stat, out = subprocess.getstatusoutput('grep -c %s %s' % (dsname, options.dslist))
747  if stat==2: # Missing file etc
748  print (out)
749  sys.exit(1)
750 
751  try:
752  if int(out)==0:
753  print (' ==> SKIPPING - dataset %s not found in %s' % (dsname, options.dslist))
754  continue
755  except ValueError: # Some other error
756  print (out)
757  sys.exit(1)
758 
759  del stat, out
760  # # Find full dtasetname from file
761  stat, out = subprocess.getstatusoutput('grep %s %s' % (dsname, options.dslist))
762  if stat==2: # Missing file etc
763  print (out)
764  sys.exit(1)
765 
766  t['FULLDSNAME'] = out.strip()
767 
768  taskList.append(t)
769 
770  if not taskList:
771  print ('\nNo jobs need to be run.\n')
772  sys.exit(0)
773 
774  print ('\n%i datasets / tasks selected.\n' % len(taskList))
775  if not options.batch:
776  a = input('\nARE YOU SURE [n] ? ')
777  if a!='y':
778  sys.exit('ERROR: Running of monitoring tasks aborted by user')
779  print()
780  for t in taskList:
781  fullcmd = cmd % t
782  print ('\nExecuting: ',fullcmd,' ...')
783  sys.stdout.flush()
784  os.system(fullcmd)
785 
786  print()
787  sys.exit(0)
788 
789 
790 #
791 # Run monitoring jobs for jobs completed at most MAXTIME ago
792 #
793 if cmd=='runMonJobs' and len(args)<3:
794  if len(args)==2:
795  earliestUpdateTime = time.time()-float(args[1])
796  else:
797  earliestUpdateTime = 0
798  if not options.beamspottag:
799  sys.exit('ERROR: No beam spot tag specified')
800 
801  print ('Running the following monitoring tasks for tasks of type %s:\n' % options.runtaskname)
802  print (' %-10s %s' % ('RUN','MONITORING TASK'))
803  print (' %s' % (60*'-'))
804 
805  onDiskCode = TaskManager.OnDiskCodes['ALLONDISK']
806  taskList = []
807  with getTaskManager() as taskman:
808  for t in taskman.taskIterDict('*',["where TASKNAME like '%s%%'" % options.runtaskname,
809  "and UPDATED >= ", DbParam(earliestUpdateTime),
810  "and ONDISK = ", DbParam(onDiskCode),
811  "and COOLTAGS like '%%%s%%'" % options.beamspottag,
812  "order by RUNNR desc"]):
813  dsname = t['DSNAME']
814  runnr = t['RUNNR']
815  taskName = t['TASKNAME']
816  datatag = taskName.split('.')[-1].split('_')[0]
817  monTaskName = 'MON.%s.%s' % (taskName,datatag)
818  useRun = True
819  if options.useRun is not None:
820  useRun = False
821  if runnr == options.useRun:
822  useRun = True
823 
824  if useRun:
825  try:
826  m = next(taskman.taskIterDict('*',["where RUNNR =",DbParam(runnr),"and DSNAME =",DbParam(dsname),"and TASKNAME =",DbParam(monTaskName),"order by UPDATED desc"]))
827  print (' %-10s %s %s'% (runnr,dsname,monTaskName))
828  except:
829  print (' * %-10s %s %s'% (runnr,dsname,'--- no monitoring task found ---'))
830  taskList.append(t)
831  pass
832 
833  if not taskList:
834  print ('\nNo jobs need to be run.\n')
835  sys.exit(0)
836 
837  if not options.batch:
838  a = input('\nARE YOU SURE [n] ? ')
839  if a!='y':
840  sys.exit('ERROR: Running of monitoring tasks aborted by user')
841  print()
842 
843  oracle = getT0DbConnection()
844  for t in taskList:
845  dsname = t['DSNAME']
846  runnr = t['RUNNR']
847  ptag = dsname.split('.')[0]
848  stream = dsname.split('.')[2]
849  taskName = t['TASKNAME']
850  fulldatatag = taskName.split('.')[-1]
851  datatag = fulldatatag.split('_')[0]
852  monTaskName = 'MON.%s' % (taskName)
853 
854  # Now that beamspot folder tag can change, the tag to be monitored must be the one the parent task uploaded to.
855  # If more than one tag (might be case if upload to current/next) then take first (should be same)
856  cooltags = t['COOLTAGS']
857  if not cooltags: cooltags = ''
858  bstag = cooltags.split()[0]
859 
860  filter = 'AOD'
861  if any(t[0] == 'm' for t in fulldatatag.split('_')):
862  t0dsname = '%s.merge.AOD.%s%%' % (dsname, datatag)
863  else:
864  t0dsname = '%s.recon.AOD.%s%%' % (dsname, datatag)
865 
866  c = getJobConfig('.',dsname,taskName)
867  if 'ESD' in c['inputfiles'][0]:
868  filter = 'ESD'
869  t0dsname = '%s.recon.ESD.%s' % (dsname, datatag)
870 
871  print ('\nRunning monitoring job for run %s:' % runnr)
872 
873  submitjob=True
874  eospath=options.eospath
875  # for old runs we would need to check if the dataset had been replicated at Tier-0.
876  # with EOS this is no longer necessary.
877  r=[]
878  if int(runnr)<240000:
879  print ('... Querying T0 database for replication of %s' % t0dsname)
880  cur = oracle.cursor()
881  cur.execute("select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
882  r = cur.fetchall()
883  if not r:
884  print (' WARNING: input data not yet replicated - please retry later')
885  submitjob=False
886  else:
887  print ('... Querying T0 database for completion of merging jobs of %s' % t0dsname)
888  cur = oracle.cursor()
889  origt0TaskName='%s.recon.AOD.%s%%.aodmerge.task' % (dsname,datatag)
890  cur.execute("select status from tasks where taskname like '%s' and tasktype='aodmerge'" % origt0TaskName)
891  r = cur.fetchall()
892  if not r:
893  print (' WARNING: can\'t get status of merge job for %s, running on un-merged samples instead' % origt0TaskName)
894  eospath='/eos/atlas/atlastier0/tzero/prod'
895  elif not (r[0][0]=='FINISHED' or r[0][0]=='TRUNCATED'):
896  print (' Merge job for taskname %s is not finished yet, has status %s, running on un-merged samples instead.' % (origt0TaskName, r[0][0]))
897  eospath='/eos/atlas/atlastier0/tzero/prod'
898  else:
899  print (' Merge job is finished, launching jobs.')
900  submitjob=True
901 
902  if submitjob:
903  if int(runnr)<240000:
904  print (' ',r)
905  print ('... Submitting monitoring task')
906  queue = options.batch_queue or '\'\"tomorrow\"\''
907  paramValues = ''
908  if options.params:
909  paramValues = '--params \''+options.params+'\''
910  testFlag = ''
911  if options.testonly:
912  testFlag = '--test'
913 
914  cmd = 'beamspotman --eospath=%s -p %s -s %s -f \'.*\\.%s\\..*\' -t %s --queue %s %s %s --montaskname %s runMon %i %s' % (eospath,ptag,stream,filter,bstag,queue,paramValues,testFlag,monTaskName,int(runnr),datatag)
915  print (cmd)
916  sys.stdout.flush()
917  status = os.system(cmd) >> 8 # Convert to standard Unix exit code
918  if status:
919  print ('\nERROR: Job submission returned error - exit code %i\n')
920 
921  print()
922  sys.exit(0)
923 
924 
925 #
926 # Archive task to EOS
927 #
928 if cmd=='archive' and len(args)==3:
929  if not options.batch:
930  print ('\nWARNING: If you confirm below, each of the following datasets will:')
931  print (' - be archived to EOS')
932  if options.resultsondisk:
933  print (' - will be marked as RESULTSONDISK in the task database')
934  print (' - all except the results files *** WILL BE DELETED ***')
935  else:
936  print (' - will be marked as ARCHIVED in the task database')
937  print (' - all its files *** WILL BE DELETED ***')
938  print()
939 
940  with getTaskManager() as taskman:
941  try:
942  taskList = getFullTaskNames(taskman,args[1],args[2],confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
943  except TaskManagerCheckError as e:
944  print (e)
945  sys.exit(1)
946 
947  tmpdir = '/tmp'
948  path = archivepath
949  onDiskCode = TaskManager.OnDiskCodes.get('ALLONDISK',None)
950  archivedCode = TaskManager.OnDiskCodes.get('RESULTSONDISK',None) if options.resultsondisk else TaskManager.OnDiskCodes.get('ARCHIVED',None)
951  exceptList = ['*dqflags.txt', '*.gif', '*.pdf', '*.config.py*', '*.argdict.gpickle', '*.AveBeamSpot.log', '*.PlotBeamSpotCompareReproc.log', '*.sh', '*.BeamSpotNt*', '*.BeamSpotGlobalNt.log', '*.status.*', '*.exit.*']
952 
953  for (dsname,taskname) in taskList:
954  t = taskman.getTaskDict(dsname,taskname)
955 
956  # Check that task files are still on disk, so we're not overwriting an earlier archive
957  if t['ONDISK'] != onDiskCode:
958  print ('Skipping task %s / %s status %s (task files must be on disk)' % (dsname,taskname,getKey(TaskManager.OnDiskCodes,t['ONDISK'])))
959  print()
960  continue
961 
962  dir = '%s/%s' % (dsname,taskname)
963  outname = dir.replace('/','-')+time.strftime('-%G_%m_%d.tar.gz')
964  print ('Archiving task %s / %s ...' % (dsname,taskname))
965  print (' --> %s/%s ...' % (path,outname))
966 
967  # Paranoia check against later catastrophic delete
968  if dir=='.' or dir=='*':
969  print ('\n**** FATAL ERROR: Very dangerous value of task directory found: %s - ABORTING' % dir)
970  sys.exit(1)
971 
972  # If expected directory exists, tar up files, write to EOS, mark as archived, and delete
973  if os.path.exists(dir):
974  status = os.system('tar czf %s/%s %s' % (tmpdir,outname,dir)) >> 8
975  if status:
976  sys.exit('\n**** ERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
977  status = os.system('xrdcp %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
978  if status:
979  os.system('rm -rf %s/%s' % (path,outname) )
980  status = os.system('xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
981  if status:
982  sys.exit('\n**** ERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
983 
984  os.system('rm %s/%s' % (tmpdir,outname))
985  n = taskman.setValue(dsname,taskname,'ONDISK',archivedCode)
986  if n!=1:
987  sys.exit('\n**** ERROR: Unexpected number of tasks modified: %i instead of 1 (DSNAME=%s,TASKNAME=%s)' % (n,dsname,taskname))
988 
989  if options.resultsondisk:
990  oscmd = "find %s ! \‍( -name '%s' \‍) -type f -exec rm {} \;" % (dir, "' -or -name '".join(exceptList))
991  os.system(oscmd)
992  else:
993  os.system('rm -rf %s' % dir)
994  else:
995  print ('\n**** ERROR: No task directory',dir,'\n')
996 
997  print()
998  sys.exit(0)
999 
1000 #
1001 # Run beam spot resubmit failed jobs
1002 # Double check directory structure is appropriate for you
1003 #
1004 
1005 if cmd=='resubmit' and len(args) in [3,4]:
1006 
1007  dsname = args[1]
1008  taskname = args[2]
1009 
1010  # for backwards compatibility these are equivalent:
1011  # $0 resubmit DSNAME TASKNAME QUEUE
1012  # $0 -q QUEUE resubmit DSNAME TASKNAME
1013  queue = args[3] if len(args) == 4 else options.batch_queue
1014  if not queue:
1015  print ('ERROR: No queue was specified (use -q)')
1016  sys.exit(1)
1017 
1018  basepath = os.path.join(os.getcwd(), dsname, taskname)
1019  dircontents = os.listdir(basepath)
1020 
1021  condorScriptTemplate="""executable = %(scriptfile)s
1022 arguments = $(ClusterID) $(ProcId)
1023 output = %(logfile)s.out
1024 error = %(logfile)s.err
1025 log = %(logfile)s.log
1026 universe = vanilla
1027 +JobFlavour = %(batchqueue)s
1028 queue
1029 """
1030 
1031 
1032  for dir in dircontents:
1033  if not os.path.isdir(os.path.join(basepath, dir)):
1034  continue
1035  print (dir)
1036  jobname = dir
1037  if (options.montaskname in taskname.split('.')) or options.legacy_mon:
1038  jobname = '-'.join([dsname, taskname, 'lb' + dir])
1039  fullpath = os.path.join(basepath, dir)
1040 
1041  isRunning = False
1042  isFailed = False
1043  for f in os.listdir(fullpath):
1044  if re.search('RUNNING', f):
1045  isRunning = True
1046  if re.search('COMPLETED',f) or re.search('POSTPROCESSING',f):
1047  with open(os.path.join(fullpath, jobname + '.exitstatus.dat')) as statusFile:
1048  status = statusFile.read(1)
1049  print (status)
1050  if status != "0":
1051  isFailed = True
1052  isRunning = False
1053  if options.resubRunning and isRunning:
1054  print ("Will resubmit running job")
1055  elif (isRunning or not isFailed) and not options.resubAll:
1056  continue
1057 
1058 
1059  for f in os.listdir(fullpath):
1060  if re.search('.exitstatus.', f):
1061  os.remove(os.path.join(fullpath, f))
1062  elif re.search('.exit.', f):
1063  os.remove(os.path.join(fullpath, f))
1064  elif re.search('.status.', f):
1065  os.remove(os.path.join(fullpath, f))
1066  elif re.search('.log', f):
1067  os.remove(os.path.join(fullpath, f))
1068  elif re.search('.py.final.py', f):
1069  os.remove(os.path.join(fullpath, f))
1070 
1071  jobConfig = {
1072  'test': 'this',
1073  'batchqueue' : queue,
1074  'jobname' : jobname,
1075  'jobdir' : fullpath,
1076  }
1077  jobConfig['logfile'] = '%(jobdir)s/%(jobname)s.log' % jobConfig
1078  jobConfig['scriptfile'] = '%(jobdir)s/%(jobname)s.sh' % jobConfig
1079 
1080  condorScript = condorScriptTemplate % jobConfig
1081  print (condorScript)
1082  script = open('condorSubmit.sub','w')
1083  script.write(condorScript)
1084  script.close()
1085  os.chmod('condorSubmit.sub',0o755)
1086  #batchCmd = 'bsub -L /bin/bash -q %(batchqueue)s -J %(jobname)s -o %(logfile)s %(scriptfile)s' % jobConfig
1087  batchCmd = 'condor_submit condorSubmit.sub'
1088 
1089  print (batchCmd)
1090  os.system(batchCmd)
1091 
1092  with getTaskManager() as taskman:
1093  print (taskman.getStatus(dsname, taskname))
1094  taskman.setStatus(dsname, taskname, TaskManager.StatusCodes['RUNNING'] )
1095 
1096  sys.exit(0)
1097 
1098 
1099 #
1100 # Run beam spot reprocessing jobs
1101 # Defines one task with several jobs where the splitting into groups of 5 LBs is done by hand
1102 #
1103 if cmd=='reproc' and len(args)==5:
1104  from InDetBeamSpotExample import HTCondorJobRunner
1105 
1106  jobopts = args[1]
1107  dsname = args[2]
1108  taskname = args[3]
1109  inputdata = args[4]
1110 
1111  lbperjob = options.lbperjob
1112  params = {'LumiRange': lbperjob}
1113  cmd = ' '.join(sys.argv)
1114  files = []
1115 
1116  # Additional job parameters
1117  for s in options.params.split(', '):
1118  if s:
1119  try:
1120  p = s.split('=',1)
1121  params[p[0].strip()] = eval(p[1].strip())
1122  except:
1123  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
1124 
1125  # TODO: Is this still needed?
1126  # if options.rucio:
1127  # # Has extra subdirectories: find unique set of these from filepaths, ignoring final part (i.e. filename).
1128  # # Will also work for single directory but more time consuming.
1129  # #inputdirs = set(['/'+'/'.join(f.split('/')[4:-1])+'/' for f in files])
1130  # inputdirs = set(['/'.join(f.replace(protocol, '').split('/')[:-1])+'/' for f in files])
1131 
1132  # Make list of all files (either from explicit list or by 'globbing' dir)
1133  lbMap = {}
1134  backend = DiskUtils.EOS() if options.eos else None
1135  fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1136  fs = fs.matching(options.filter).use_files_from(options.filelist).only_latest(options.removedups)
1137  for f, lbs in file_set.with_lumi_blocks(options.lbfilemap):
1138  lbMap[f] = lbs
1139  files.append(f)
1140  if not files: fail('No files were found.')
1141 
1142  # Form bunched jobs
1143  jobFileDict = {}
1144  jobLBDict = {}
1145 
1146  # Special case to submit a single job over all files
1147  if lbperjob == -1:
1148  jobId = 1
1149  jobFileDict[jobId] = files
1150  jobLBDict[jobId] = []
1151 
1152  for f in files:
1153  try:
1154  lbs = sorted(lbMap[f.split('/')[-1]])
1155  except KeyError:
1156  sys.exit('No mapping for file %s' % f.split('/')[-1])
1157 
1158  jobLBDict[jobId].extend(lbs)
1159  else:
1160  for f in files:
1161  try:
1162  lbs = sorted(lbMap[f.split('/')[-1]])
1163  except KeyError:
1164  sys.exit('No mapping for file %s' % f.split('/')[-1])
1165 
1166  for lbnr in lbs:
1167  jobId = int((lbnr-1)/lbperjob)
1168 
1169  if not jobId in jobFileDict:
1170  jobFileDict[jobId] = [f]
1171  jobLBDict[jobId] = [lbnr]
1172  else:
1173  if not f in jobFileDict[jobId]:
1174  jobFileDict[jobId].append(f)
1175  jobLBDict[jobId].append(lbnr)
1176 
1177  # Submit bunched jobs
1178  with getTaskManager() as taskman:
1179  for i in sorted(jobFileDict.keys()):
1180  jobnr = i*lbperjob+1 # use first LB number as job number
1181  files=jobFileDict[i]
1182  lbs = sorted(set(jobLBDict[i]))
1183 
1184  #params['lbList'] = '[' + ','.join([str(l) for l in lbs]) + ']'
1185  intlbs = []
1186  for lbnr in lbs:
1187  intlbs.append(int(lbnr))
1188  params['lbList'] = intlbs
1189  jobname=dsname+'-'+taskname+'-lb%03i' % jobnr
1190 
1191  queue = options.batch_queue or '"tomorrow"'
1192  runner = HTCondorJobRunner.HTCondorJobRunner(
1193  jobnr=jobnr,
1194  jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1195  jobname=jobname,
1196  inputds='',
1197  inputfiles=files,
1198  joboptionpath=jobopts,
1199  filesperjob=len(files),
1200  batchqueue=queue,
1201  addinputtopoolcatalog=True,
1202  taskpostprocsteps='ReprocVertexDefaultProcessing',
1203  #outputfilelist=['dpd.root', 'nt.root', 'monitoring,root', 'beamspot.db'],
1204  autoconfparams='DetDescrVersion',
1205  returnstatuscode=True,
1206  comment=cmd,
1207  **params)
1208 
1209  #runner.showParams()
1210  try:
1211  runner.configure()
1212  except Exception as e:
1213  print ("ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1214  print ("DEBUG: Exception =",e)
1215  else:
1216  taskman.addTask(dsname, taskname, jobopts, runner.getParam('release'), runner.getNJobs(), runner.getParam('taskpostprocsteps'), comment=cmd)
1217  runner.run()
1218 
1219  sys.exit(0)
1220 
1221 
1222 #
1223 # Run task over AOD, bunching input files to jobs according to meta-data in input
1224 # AOD files.
1225 #
1226 
1227 if cmd=='runaod' and len(args)==5:
1228 
1229  from InDetBeamSpotExample import HTCondorJobRunner
1230  jobopts = args[1]
1231  dsname = args[2]
1232  taskname = args[3]
1233  inputdata = args[4]
1234 
1235  lbperjob = options.lbperjob
1236  params = {'LumiRange': lbperjob}
1237 
1238  # Always fit a single pLb for scans
1239  if options.pseudoLbFile:
1240  params = {'LumiRange': 1}
1241 
1242  cmd = ' '.join(sys.argv)
1243  files = []
1244 
1245  # Additional job parameters
1246  for s in options.params.split(', '):
1247  if s:
1248  try:
1249  p = s.split('=',1)
1250  params[p[0].strip()] = eval(p[1].strip())
1251  except:
1252  print ('\nERROR parsing user parameter',p,'- parameter will be ignored')
1253 
1254  lbMap = {}
1255  backend = DiskUtils.EOS() if options.eos else None
1256  fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1257  print ("****************************************************")
1258  print ("*************** printing files *********************")
1259  print (fs)
1260  fs = fs.matching(options.filter)
1261  for f, lbs in fs.with_lumi_blocks(options.lbfilemap):
1262  print (f, lbs)
1263  lbMap[f] = lbs
1264  files.append(f)
1265  if not files: fail('No files were found.')
1266 
1267  # Form bunched jobs
1268  jobFileDict = {}
1269  jobLBDict = {}
1270  jobParams = {}
1271 
1272  if options.pseudoLbFile:
1273  # Extract start and end times of real LBs
1274  from InDetBeamSpotExample.COOLUtils import COOLQuery
1275  coolQuery = COOLQuery()
1276  from InDetBeamSpotExample.Utils import getRunFromName
1277  lbTimes = coolQuery.getLbTimes( getRunFromName(dsname, None, True) )
1278 
1279  # Loop over pseudo LBs
1280  with open(options.pseudoLbFile) as pLbFile:
1281  for line in pLbFile:
1282  if line[0] == '#': continue
1283 
1284  tokens = line.split()
1285  plbnr,tstart,tend = int(tokens[0]),int(tokens[1]),int(tokens[2])
1286  jobId = int(plbnr/lbperjob)
1287 
1288  # if we're passing in a file with acqFlags, and we're checking those flags,
1289  # then skip any points that don't have acqFlag=1.0
1290  if not options.noCheckAcqFlag and len(tokens)>=5 and abs(float(tokens[4])-1.)>0.001:
1291  print ("Point is not stationary -- skipping job %d" % jobId)
1292  continue
1293 
1294  # Find real LBs covering time period of pseudo LB. Assumes pLBs in nsec
1295  rlbs = [lb for (lb,time) in lbTimes.items() if (time[0] - tend/1e9)*(time[1] - tstart/1e9) < 0]
1296 
1297  # Find files containing those real lbs
1298  filenames = []
1299  for f in files:
1300  try:
1301  lbs = sorted(lbMap[f])
1302  except KeyError:
1303  sys.exit('No mapping for file %s' % f.split('/')[-1])
1304 
1305  if not sum([lb for lb in lbs if lb in rlbs]): continue
1306 
1307  filenames.append(f)
1308  try:
1309  jobLBDict[jobId].extend([lb for lb in rlbs if not lb in jobLBDict[jobId]])
1310  jobFileDict[jobId].extend([f for f in filenames if not f in jobFileDict[jobId]])
1311  jobParams[jobId]['lbData'].append(line.strip('\n').strip())
1312  except KeyError:
1313  jobLBDict[jobId] = rlbs
1314  jobFileDict[jobId] = filenames
1315  jobParams[jobId] = {'lbData' : [line.strip('\n').strip()]}
1316 
1317  else:
1318  for f in files:
1319  try:
1320  lbs = sorted(lbMap[f])
1321  except KeyError:
1322  print ('WARNING: No mapping for file %s. Skipping' % f.split('/')[-1])
1323  continue
1324  #sys.exit('No mapping for file %s' % f.split('/')[-1])
1325 
1326  for lbnr in lbs:
1327  jobId = int((lbnr-1)/lbperjob)
1328 
1329  if not jobId in jobFileDict:
1330  jobFileDict[jobId] = [f]
1331  jobLBDict[jobId] = [lbnr]
1332  else:
1333  if not f in jobFileDict[jobId]:
1334  jobFileDict[jobId].append(f)
1335  jobLBDict[jobId].append(lbnr)
1336 
1337  # Submit bunched jobs
1338  with getTaskManager() as taskman:
1339  for i in sorted(jobFileDict.keys()):
1340  jobnr = i*lbperjob+1 # use first LB number as job number
1341  files=jobFileDict[i]
1342  lbs = sorted(set(jobLBDict[i]))
1343 
1344  intlbs = []
1345  for lbnr in lbs:
1346  intlbs.append(int(lbnr))
1347  params['lbList'] = intlbs
1348 
1349  # Allow job-by-job parameters
1350  try:
1351  p = jobParams[i]
1352  for k,v in p.items(): params[k] = v
1353  except KeyError:
1354  pass
1355 
1356  jobname=dsname+'-'+taskname+'-lb%03i' % jobnr
1357 
1358  queue = options.batch_queue
1359  if queue is None:
1360  # run on a different queue for VdM scans to avoid clogging up the normal queue
1361  queue='"tomorrow"' if options.pseudoLbFile else '"tomorrow"'
1362  runner = HTCondorJobRunner.HTCondorJobRunner(
1363  jobnr=jobnr,
1364  jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1365  jobname=jobname,
1366  inputds='',
1367  inputfiles=files,
1368  joboptionpath=jobopts,
1369  filesperjob=len(files),
1370  batchqueue=queue,
1371  addinputtopoolcatalog=True,
1372  taskpostprocsteps=' '.join(options.postprocsteps),
1373  autoconfparams='DetDescrVersion',
1374  returnstatuscode=True,
1375  comment=cmd,
1376  **params)
1377  if options.testonly:
1378  runner.showParams()
1379  else:
1380  try:
1381  runner.configure()
1382  except Exception as e:
1383  print ("ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1384  print ("DEBUG: Exception =",e)
1385  else:
1386  taskman.addTask(dsname, taskname, jobopts, runner.getParam('release'), runner.getNJobs(), runner.getParam('taskpostprocsteps'), comment=cmd)
1387  runner.run()
1388 
1389  sys.exit(0)
1390 
1391 
1392 
1393 #
1394 # Upload any DQ sqlite file to COOL (independent of task, w/o book keeping)
1395 #
1396 if cmd=='dqflag' and len(args)==2:
1397  dbfile = args[1]
1398  if not options.dqtag:
1399  sys.exit('ERROR: No beamspot DQ tag specified')
1400  try:
1401  with open(proddqcoolpasswdfile, 'r') as passwdfile:
1402  passwd = passwdfile.read().strip()
1403  except:
1404  sys.exit('ERROR: Unable to determine DQ COOL upload password')
1405 
1406  print ('\nBeam spot DQ file: ',dbfile)
1407  print ('Uploading to tag: ',options.dqtag)
1408 
1409  if options.ignoremode:
1410  ignoremode = '--ignoremode %s' % options.ignoremode
1411  else:
1412  ignoremode = ''
1413 
1414  if options.batch:
1415  batchmode = '--batch'
1416  else:
1417  batchmode = ''
1418 
1419  print()
1420  cmd = 'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1421  print (cmd)
1422 
1423  stat = os.system(cmd)
1424 
1425  # Old DQ flags
1426  #stat = os.system('/afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s --folder /GLOBAL/DETSTATUS/SHIFTOFL --tag %s --retag %s --destdb %s %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_GLOBAL_W %s' % (batchmode,ignoremode,options.srcdqtag,options.dqtag,options.destdqdbname,dbfile,options.srcdqdbname,passwd))
1427 
1428  if stat:
1429  print ("\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1430  sys.exit(1)
1431  sys.exit(0)
1432 
1433 #
1434 # Upload beam spot DQ flag for given task to COOL
1435 #
1436 if cmd=='dqflag' and len(args)==3:
1437  try:
1438  with getTaskManager() as taskman:
1439  [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=True,confirmWithUser=not options.batch,addWildCards=not options.nowildcards)
1440  except TaskManagerCheckError as e:
1441  print (e)
1442  sys.exit(1)
1443 
1444  if not options.dqtag:
1445  sys.exit('ERROR: No beam spot DQ tag specified')
1446 
1447  dbfile = glob.glob('%s/%s/*-dqflags.db' % (dsname,task))
1448  if len(dbfile)!=1:
1449  print ('ERROR: Missing or ambiguous input COOL DQ file:',dbfile)
1450  sys.exit()
1451 
1452  try:
1453  with open(proddqcoolpasswdfile, 'r') as passwdfile:
1454  passwd = passwdfile.read().strip()
1455  except:
1456  sys.exit('ERROR: Unable to determine DQ COOL upload password')
1457 
1458  print ('\nData set: ',dsname)
1459  print ('Beam spot DQ file: ',dbfile[0])
1460  print ('Uploading to tag: ',options.dqtag)
1461 
1462  if options.ignoremode:
1463  ignoremode = '--ignoremode %s' % options.ignoremode
1464  else:
1465  ignoremode = ''
1466  if options.batch:
1467  batchmode = '--batch'
1468  else:
1469  batchmode = ''
1470 
1471  cmd = 'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1472  print ("Running %s" % cmd)
1473  stat = os.system(cmd)
1474 
1475  # Old DQ flags
1476  #stat = os.system('/afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s --folder /GLOBAL/DETSTATUS/SHIFTOFL --tag %s --retag %s --destdb %s %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_GLOBAL_W %s' % (batchmode,ignoremode,options.srcdqtag,options.dqtag,options.destdqdbname,dbfile[0],options.srcdqdbname,passwd))
1477  if stat:
1478  print ("\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1479  sys.exit(1)
1480 
1481  # Record upload information in upload flag file
1482  uploadflag = dbfile[0]+'.uploaded'
1483  stat = os.system('echo "`date` %s" >> %s' % (options.dqtag,uploadflag) )
1484  if stat:
1485  print ("ERROR: Uploading DQ flag was successful, but unable to set upload flag", uploadflag)
1486 
1487  sys.exit(0)
1488 
1489 #
1490 # Run beam spot BCID job
1491 #
1492 if cmd=='runBCID' and len(args)==3:
1493  run = args[1]
1494  tag = args[2]
1495 
1496  dataset, dsname = dataset_from_run_and_tag(run, tag)
1497 
1498  # NOTE: The command below may be executed via a cron job, so we need set STAGE_SVCCLASS
1499  # explicitly in all cases, since it may not be inherited from the environment.
1500  # NOTE: We pass along the filter setting, but currently we can do --lbperjob only for ESD since for
1501  # other data sets we have only the merged files.
1502  run_jobs(options.bcidjoboptions, dsname, options.bcidtaskname,
1503  {
1504  'cmdjobpreprocessing' : 'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
1505  'SeparateByBCID' : True,
1506  'VertexNtuple' : False,
1507  },
1508  '--files-per-job', 0,
1509  '--match', options.filter,
1510  '--exclude', r'.*\.TMP\.log.*',
1511  '-postprocsteps', 'BCIDDefaultProcessing')
1512 
1513  sys.exit(0)
1514 
1515 #
1516 # Run BCID jobs for jobs completed at most MAXTIME ago
1517 #
1518 if cmd=='runBCIDJobs' and len(args)<3:
1519  if len(args)==2:
1520  earliestUpdateTime = time.time()-float(args[1])
1521  else:
1522  earliestUpdateTime = 0
1523 
1524  if not options.beamspottag:
1525  sys.exit('ERROR: No beam spot tag specified')
1526 
1527  print ('Running the following BCID tasks for tasks of type %s:\n' % options.runtaskname)
1528  print (' %-10s %s' % ('RUN','BCID TASK'))
1529  print (' %s' % (60*'-'))
1530 
1531  taskList = []
1532  with getTaskManager() as taskman:
1533  for t in taskman.taskIterDict('*',["where TASKNAME like '%s%%'" % options.runtaskname,
1534  "and UPDATED >= ", DbParam(earliestUpdateTime),
1535  "and COOLTAGS like '%%%s%%'" % options.beamspottag,
1536  "order by RUNNR desc"]):
1537  dsname = t['DSNAME']
1538  runnr = t['RUNNR']
1539  taskName = t['TASKNAME']
1540  datatag = taskName.split('.')[-1].split('_')[0]
1541  bcidTaskName = 'BCID.%s.%s' % (taskName,datatag)
1542 
1543  try:
1544  m = next(taskman.taskIterDict('*',['where RUNNR =',DbParam(runnr),'and DSNAME =',DbParam(dsname),'and TASKNAME =',DbParam(bcidTaskName),'order by UPDATED desc']))
1545  print (' %-10s %s'% (runnr,bcidTaskName))
1546  except:
1547  print (' * %-10s %s'% (runnr,'--- no BCID task found ---'))
1548  taskList.append(t)
1549  pass
1550 
1551  if not taskList:
1552  print ('\nNo jobs need to be run.\n')
1553  sys.exit(0)
1554 
1555  if not options.batch:
1556  a = input('\nARE YOU SURE [n] ? ')
1557  if a!='y':
1558  sys.exit('ERROR: Running of BCID tasks aborted by user')
1559  print()
1560 
1561  oracle = getT0DbConnection()
1562  for t in taskList:
1563  dsname = t['DSNAME']
1564  runnr = t['RUNNR']
1565  ptag = dsname.split('.')[0]
1566  stream = dsname.split('.')[2]
1567  taskName = t['TASKNAME']
1568  fulldatatag = taskName.split('.')[-1].split('_')[0]
1569  datatag = taskName.split('.')[-1].split('_')[0]
1570  bcidTaskName = 'BCID.%s.%s' % (taskName,datatag)
1571 
1572  filter = 'AOD'
1573  if any(t[0] == 'm' for t in fulldatatag.split('_')):
1574  t0dsname = '%s.merge.%s.%s%%' % (dsname, filter, datatag)
1575  else:
1576  t0dsname = '%s.recon.%s.%s%%' % (dsname, filter, datatag)
1577 
1578  c = getJobConfig('.',dsname,taskName)
1579  if 'ESD' in c['inputfiles'][0]:
1580  filter = 'ESD'
1581  t0dsname = '%s.recon.ESD.%s' % (dsname, datatag)
1582 
1583  print ('\nRunning BCID job for run %s:' % runnr)
1584 
1585  print ('... Querying T0 database for replication of %s' % t0dsname)
1586  cur = oracle.cursor()
1587  cur.execute("select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
1588  r = cur.fetchall()
1589  if not r:
1590  print (' WARNING: input data not yet replicated - please retry later')
1591  else:
1592  print (' ',r)
1593  print ('... Submitting BCID task')
1594  cmd = 'beamspotman.py -p %s -s %s -f \'.*\\.%s\\..*\' --bcidtaskname %s runBCID %i %s' % (ptag,stream,filter,'BCID.'+taskName,int(runnr),datatag)
1595  print (' %s' % cmd)
1596  sys.stdout.flush()
1597  status = os.system(cmd) >> 8 # Convert to standard Unix exit code
1598  if status:
1599  print ('\nERROR: Job submission returned error - exit code %i\n')
1600 
1601  print()
1602  sys.exit(0)
1603 #
1604 # Create an sqlite file containing a MC (OFLP200) tag a la beamSpot_set.py
1605 # Need -- before positional agruments to deal with -ve values
1606 # beamspotman.py -t IndetBeampos-7TeV-PeriodD-SmallWidth-001 mctag -- 0 0.1 1.1 -5 0.045 0.048 63 0.00043 -4e-05 9e-05
1607 #
1608 if cmd=='mctag' and len(args)<12:
1609 
1610  from InDetBeamSpotExample.COOLUtils import *
1611 
1612  if not options.beamspottag:
1613  sys.exit('ERROR: No beam spot tag specified')
1614 
1615  dbfile=options.beamspottag + '.db'
1616  folderHandle = openBeamSpotDbFile(dbfile, dbName = 'OFLP200', forceNew = True)
1617 
1618  runMin = options.runMin if options.runMin is not None else 0
1619  runMax = options.runMax if options.runMax is not None else (1 << 31)-1
1620 
1621  writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1622  runMin=runMin, runMax=runMax,
1623  status=int(args[1]),
1624  posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1625  sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1626  tiltX=float(args[8]) if len(args)>8 else 0.,
1627  tiltY=float(args[9]) if len(args)>9 else 0.,
1628  sigmaXY=float(args[10]) if len(args)>10 else 0.,
1629  posXErr=0., posYErr=0., posZErr=0.,
1630  sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1631  tiltXErr=0., tiltYErr=0.,
1632  sigmaXYErr=0.)
1633 
1634  print ('* MC beamspot tag written to db=OFLP200, tag=%s in %s ' %(options.beamspottag, dbfile))
1635  print (' - AtlCoolConsole.py "sqlite://;schema=' + dbfile + ';dbname=OFLP200"')
1636  print ('* To upload to oracle use:')
1637  print (' - beamspotman.py --srctag %s -t %s --srcdbname OFLP200 --destdbname OFLP200 upload %s' %(options.beamspottag, options.beamspottag, dbfile))
1638  sys.exit(0)
1639 
1640 #
1641 # Create an sqlite file containing a Data (CONDBR2) tag a la beamSpot_set.py
1642 # Need -- before positional agruments to deal with -ve values
1643 # beamspotman.py -t IndetBeampos-7TeV-PeriodD-SmallWidth-001 maketag -- 0 0.1 1.1 -5 0.045 0.048 63 0.00043 -4e-05 9e-05
1644 #
1645 if cmd=='maketag' and len(args)<12:
1646 
1647  from InDetBeamSpotExample.COOLUtils import *
1648 
1649  if not options.beamspottag:
1650  sys.exit('ERROR: No beam spot tag specified')
1651 
1652  dbfile=options.beamspottag + '.db'
1653  dbName=options.destdbname
1654  folderHandle = openBeamSpotDbFile(dbfile, dbName = options.destdbname, forceNew = True)
1655 
1656  runMin = options.runMin if options.runMin is not None else 0
1657  runMax = options.runMax if options.runMax is not None else (1 << 31)-1
1658 
1659  writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1660  runMin=runMin, runMax=runMax,
1661  status=int(args[1]),
1662  posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1663  sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1664  tiltX=float(args[8]) if len(args)>8 else 0.,
1665  tiltY=float(args[9]) if len(args)>9 else 0.,
1666  sigmaXY=float(args[10]) if len(args)>10 else 0.,
1667  posXErr=0., posYErr=0., posZErr=0.,
1668  sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1669  tiltXErr=0., tiltYErr=0.,
1670  sigmaXYErr=0.)
1671 
1672  print ('* Beamspot tag written to db=%s, tag=%s in %s ' %(dbName,options.beamspottag, dbfile))
1673  print (' - AtlCoolConsole.py "sqlite://;schema=' + dbfile + ';dbname=%s"' %(dbName))
1674  print ('* To upload to oracle use:')
1675  print (' - beamspotman.py --srctag %s -t %s --srcdbname %s --destdbname %s upload %s' %(options.beamspottag, options.beamspottag, dbName, dbName, dbfile))
1676  print (' - /afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_INDET_W <passwd>' %(dbfile, dbName))
1677  sys.exit(0)
1678 
1679 
1680 print ('ERROR: Illegal command or number of arguments ({})'.format(' '.join(args)))
1681 sys.exit(1)
vtune_athena.format
format
Definition: vtune_athena.py:14
python.TaskManager.getJobConfig
def getJobConfig(jobDir, dsName, taskName, jobName=' *')
Definition: TaskManager.py:103
beamspotman.getT0DbConnection
def getT0DbConnection()
Definition: beamspotman.py:169
beamspotman.dataset_from_run_and_tag
def dataset_from_run_and_tag(run, tag)
Definition: beamspotman.py:206
python.TaskManager.appendUnique
def appendUnique(s, v)
Definition: TaskManager.py:66
GlobalMonitoring_CA.doPostProcessing
doPostProcessing
Definition: GlobalMonitoring_CA.py:104
python.Utils.getRunFromName
def getRunFromName(name, default='', asInt=False)
Definition: InnerDetector/InDetExample/InDetBeamSpotExample/python/Utils.py:13
dumpHVPathFromNtuple.append
bool append
Definition: dumpHVPathFromNtuple.py:94
TaskManager
python.COOLUtils.writeBeamSpotEntry
def writeBeamSpotEntry(folderHandle, tag='nominal', runMin=0, runMax=(1<< 31) -1, lbMin=0, lbMax=(1<< 32) -2, status=0, posX=0., posY=0., posZ=0., sigmaX=30., sigmaY=30., sigmaZ=500., tiltX=0., tiltY=0., sigmaXY=0., posXErr=0., posYErr=0., posZErr=0., sigmaXErr=0., sigmaYErr=0., sigmaZErr=0., tiltXErr=0., tiltYErr=0., sigmaXYErr=0.)
Definition: COOLUtils.py:63
convertTimingResiduals.sum
sum
Definition: convertTimingResiduals.py:55
fillPileUpNoiseLumi.next
next
Definition: fillPileUpNoiseLumi.py:53
beamspotman.run_jobs
def run_jobs(script, ds_name, task_name, params, *args)
Definition: beamspotman.py:222
python.TaskManager.getFullTaskNames
def getFullTaskNames(taskman, dsname, taskname, requireSingleTask=False, confirmWithUser=False, addWildCards=True)
Definition: TaskManager.py:74
PyAthena::repr
std::string repr(PyObject *o)
returns the string representation of a python object equivalent of calling repr(o) in python
Definition: PyAthenaUtils.cxx:106
DerivationFramework::TriggerMatchingUtils::sorted
std::vector< typename T::value_type > sorted(T begin, T end)
Helper function to create a sorted vector from an unsorted one.
CxxUtils::set
constexpr std::enable_if_t< is_bitmask_v< E >, E & > set(E &lhs, E rhs)
Convenience function to set bits in a class enum bitmask.
Definition: bitmask.h:224
beamspotman.BeamSpotOption
Definition: beamspotman.py:78
TCS::join
std::string join(const std::vector< std::string > &v, const char c=',')
Definition: Trigger/TrigT1/L1Topo/L1TopoCommon/Root/StringUtils.cxx:10
beamspotman.check_commsep
def check_commsep(option, opt, value)
Definition: beamspotman.py:76
COOLUtils
Trk::open
@ open
Definition: BinningType.h:40
readCCLHist.int
int
Definition: readCCLHist.py:84
python.KeyStore.list
def list(self, key=None)
Definition: KeyStore.py:318
beamspotman.getTaskManager
def getTaskManager()
Definition: beamspotman.py:193
Muon::print
std::string print(const MuPatSegment &)
Definition: MuonTrackSteering.cxx:28
str
Definition: BTagTrackIpAccessor.cxx:11
python.COOLUtils.openBeamSpotDbFile
def openBeamSpotDbFile(fileName, forceNew=False, folderName='/Indet/Beampos', dbName='BEAMSPOT')
Definition: COOLUtils.py:22
pool::getKey
std::string getKey(const std::string &key, const std::string &encoded)
calibdata.copy
bool copy
Definition: calibdata.py:27
readCCLHist.float
float
Definition: readCCLHist.py:83
Trk::split
@ split
Definition: LayerMaterialProperties.h:38
beamspotman.fail
def fail(message)
Definition: beamspotman.py:201