7beamspotman is a command line utility to do typical beam spot related tasks.
9__authors__ = [
'Juerg Beringer',
'Carl Suster']
10__version__ =
'beamspotman.py atlas/athena'
11__usage__ =
'''%prog [options] command [args ...]
15show RUNNR For given run, show data on CAF and beam spot jobs
16run RUNNR TAG Run standard beam spot reconstruction job
17runCmd DSNAME TASKNAME CMD Repeat command for matching tasks (use %(DSNAME)s and
18 %(TASKNAME)s in the command string CMD to specify the
19 dataset and task names, if needed)
20runMon RUNNR TAG Run standard beam spot monitoring job
21runMonJobs [MAXTIME] Run monitoring jobs for jobs completed at most MAXTIME ago
22postproc Default postprocessing for any task that needs it
23postproc DSNAME TASKNAME [WHATLIST] Postprocessing for a selected set of tasks
24upload DBFILE Upload SQLite file into COOL (independent of task)
25upload DSNAME TASKNAME Upload result of beam spot determination into COOL
26dq2get DSNAME TASKNAME Retrieve task data from grid job (must have set up grid env.)
27queryT0 DSNAME TASKNAME Query Tier-0 database about a task
28backup DIR Backup directory DIR (may contain wildcards) to EOS
29archive DSNAME TASKNAME Archive task data (deletes files after copying to EOS,
30 sets on-disk status to ARCHIVED)
31lsbackup List contents of backup directory on EOS
32reproc TEMPLATE DSNAME TASKNAME INPUTDIR Run reprocessing task consisting of several jobs split into 5 LBs
33 over the files in INPUTDIR
34runaod TEMPLATE DSNAME TASKNAME INPUTDIR Run over AOD, splitting jobs into sets of N LBs (similar
35 to reproc command, except for variable params)
36resubmit DSNAME TASKNAME Rerun jobs of a specific task (choose queue with -q QUEUE)
37dqflag DBFILE Upload DQ SQLite file into COOL (independent of task)
38dqflag DSNAME TASKNAME Upload result of beam spot DQ flag determination into COOL
39runBCID RUNNR TAG Run standard beam spot BCID job
40runBCIDJobs [MAXTIME] Run BCID jobs for jobs completed at most MAXTIME ago
41mctag STATUS POSX POSY POSZ Create an sqlite file containing a MC tag with the
42 SIGMAX SIGMAY SIGMAZ parameters given (and zero errors). A la beamSpot_set.py.
48proddir =
'/afs/cern.ch/user/a/atlidbs/jobs'
49produserfile =
'/afs/cern.ch/user/a/atlidbs/private/produsers.dat'
50prodcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/coolinfo.dat'
51flaskcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/flaskinfo.dat'
52proddqcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/cooldqinfo.dat'
53tier0dbinfofile =
'/afs/cern.ch/user/a/atlidbs/private/t0dbinfo.dat'
55backuppath =
'/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/backup'
56archivepath =
'/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/archive'
65from InDetBeamSpotExample.PostProcessing
import doPostProcessing
66from InDetBeamSpotExample
import BeamSpotPostProcessing
67from InDetBeamSpotExample
import COOLUtils
68from InDetBeamSpotExample
import DiskUtils
71from optparse
import Option, OptionParser, OptionGroup
73 return re.split(
r'\s*,\s*|\s+', value)
75 TYPES = Option.TYPES + (
'commsep',)
76 TYPE_CHECKER = copy(Option.TYPE_CHECKER)
77 TYPE_CHECKER[
'commsep'] = check_commsep
79parser = OptionParser(usage=__usage__, version=__version__, option_class=BeamSpotOption)
81g_input = OptionGroup(parser,
'Input file options')
82g_input.add_option(
'',
'--eos', dest=
'eos', default=
False, action=
'store_true', help=
'access files over EOS (not needed if /eos is mounted)')
83g_input.add_option(
'-e',
'--eospath', dest=
'eospath', default=
'/eos/atlas/atlastier0/rucio', help=
'eos path (excluding project and stream name)')
84g_input.add_option(
'-p',
'--project', dest=
'project', default=
'data17_13TeV', help=
'project name')
85g_input.add_option(
'-s',
'--stream', dest=
'stream', default=
'calibration_BeamSpot', help=
'stream name')
86g_input.add_option(
'-f',
'--filter', dest=
'filter', default=
r'.*\.AOD\..*', help=
'regular expression to filter input files')
87g_input.add_option(
'',
'--lbfilemap', dest=
'lbfilemap', default=
'', help=
'text file with mapping between filename and lumi blocks')
88g_input.add_option(
'',
'--rucio', dest=
'rucio', action=
'store_true', default=
False, help=
'rucio directory structure')
89g_input.add_option(
'',
'--dpdinput', dest=
'dpdinput', action=
'store_true', default=
False, help=
'Run over DPD for runaod')
90g_input.add_option(
'-l',
'--filelist', dest=
'filelist', default=
None, help=
'Explicit list of files for reproc command')
91g_input.add_option(
'',
'--removedups', dest=
'removedups', action=
'store_true', default=
False, help=
'Remove duplicate retry files of form root.N, keeping the latest')
92parser.add_option_group(g_input)
94g_mode = OptionGroup(parser,
'Mode flags')
95g_mode.add_option(
'-b',
'--batch', dest=
'batch', action=
'store_true', default=
False, help=
'batch mode - never ask for confirmation')
96g_mode.add_option(
'',
'--test', dest=
'testonly', action=
'store_true', default=
False, help=
'for runaod, show only options and input files')
97g_mode.add_option(
'',
'--expertmode', dest=
'expertmode', action=
'store_true', default=
False, help=
'expert mode (BE VERY CAREFUL)')
98g_mode.add_option(
'',
'--ignoremode', dest=
'ignoremode', default=
'', help=
'ignore update protection mode (needs expert pwd)')
99parser.add_option_group(g_mode)
102parser.add_option(
'-t',
'--beamspottag', dest=
'beamspottag', default=beamspottag, help=
'beam spot tag')
103parser.add_option(
'-d',
'--dbconn', dest=
'dbconn', default=
'', help=
'task manager database connection string (default: check TASKDB, otherwise use sqlite_file:taskdata.db)')
104parser.add_option(
'',
'--proddir', dest=
'proddir', default=proddir, help=
'production directory (default: %s' % proddir)
105parser.add_option(
'',
'--destdbname', dest=
'destdbname', default=
'CONDBR2', help=
'destination database instance name (default: CONDBR2)')
106parser.add_option(
'',
'--srcdbname', dest=
'srcdbname', default=
'BEAMSPOT', help=
'source database instance name (default: BEAMSPOT)')
107parser.add_option(
'',
'--srctag', dest=
'srctag', default=
'nominal', help=
'source tag (default: nominal)')
108parser.add_option(
'-r',
'--runjoboptions', dest=
'runjoboptions', default=
'InDetBeamSpotExample/VertexTemplate.py', help=
'template to run beam spot jobs')
109parser.add_option(
'',
'--runtaskname', dest=
'runtaskname', default=
'VTX', help=
'task name')
110parser.add_option(
'-m',
'--monjoboptions', dest=
'monjoboptions', default=
'runBeamSpotMonitor.py', help=
'template to run monitoring jobs')
111parser.add_option(
'',
'--bcidjoboptions', dest=
'bcidjoboptions', default=
'InDetBeamSpotExample/VertexTemplate.py', help=
'template to run BCID jobs')
112parser.add_option(
'',
'--montaskname', dest=
'montaskname', default=
'MON', help=
'task name')
113parser.add_option(
'',
'--bcidtaskname', dest=
'bcidtaskname', default=
'BCID', help=
'BCID task name')
114parser.add_option(
'-g',
'--griduser', dest=
'griduser', default=
'user10.JuergBeringer', help=
'grid user name prefix (e.g. user10.JuergBeringer)')
115parser.add_option(
'-n',
'--nowildcards', dest=
'nowildcards', action=
'store_true', default=
False, help=
'do not add wildcards when looking up dataset and task names')
116parser.add_option(
'-x',
'--excludeiftask', dest=
'excludeiftask', default=
'', help=
'Exclude running cmd with runCmd if such task exists already')
117parser.add_option(
'',
'--excludeds', dest=
'excludeds', default=
'', help=
'Exclude running cmd with runCmd for dataset containing a certain string')
118parser.add_option(
'',
'--archive', dest=
'archive', action=
'store_true', default=
False, help=
'archive, ie delete data after backup')
119parser.add_option(
'',
'--incremental', dest=
'incremental', action=
'store_true', default=
False, help=
'incremental backup')
120parser.add_option(
'',
'--lbperjob', dest=
'lbperjob', type=
'int', default=5, help=
'number of luminosity blocks per job (default: 5)')
121parser.add_option(
'',
'--params', dest=
'params', default=
'', help=
'job option parameters to pass to job option template')
122parser.add_option(
'-z',
'--postprocsteps', dest=
'postprocsteps', type=
'commsep', default=[
'JobPostProcessing'], help=
'Task-level postprocessing steps, comma-separated (Default: JobPostProcessing)')
123parser.add_option(
'',
'--srcdqtag', dest=
'srcdqtag', default=
'nominal', help=
'source DQ tag (default: nominal)')
124parser.add_option(
'',
'--dqtag', dest=
'dqtag', default=
'HEAD', help=
'beam spot DQ tag')
125parser.add_option(
'',
'--destdqdbname', dest=
'destdqdbname', default=
'CONDBR2', help=
'DQ destination database instance name (default: CONDBR2)')
126parser.add_option(
'',
'--srcdqdbname', dest=
'srcdqdbname', default=
'IDBSDQ', help=
'DQ source database instance name (default: IDBSDQT)')
127parser.add_option(
'',
'--dslist', dest=
'dslist', default=
'', help=
'Exclude running cmd with runCmd if the dataset is not in this file')
128parser.add_option(
'',
'--nominbias', dest=
'nominbias', action=
'store_true', default=
False, help=
'overwrite MinBias_physics in DSNAME with express_express')
129parser.add_option(
'',
'--resultsondisk', dest=
'resultsondisk', action=
'store_true', default=
False, help=
'Leave the results on disk when archiving')
130parser.add_option(
'',
'--pLbFile', dest=
'pseudoLbFile', default=
None, help=
'File for pseudo LB info from scan')
131parser.add_option(
'',
'--prefix', dest=
'prefix', default=
'', help=
'Prefix for reading files from mass storage (Default: determine from filename (`\'\') ')
132parser.add_option(
'',
'--rl', dest=
'runMin', type=
'int', default=
None, help=
'Minimum run number for mctag (inclusive)')
133parser.add_option(
'',
'--ru', dest=
'runMax', type=
'int', default=
None, help=
'Maximum run number for mctag (inclusive)')
134parser.add_option(
'',
'--useRun', dest=
'useRun', type=
'int', default=
None, help=
'Run monitoring job for a given run only')
135parser.add_option(
'',
'--noCheckAcqFlag', dest=
'noCheckAcqFlag', action=
'store_true', default=
False, help=
'Don\'t check acqFlag when submitting VdM jobs')
136parser.add_option(
'',
'--resubAll', dest=
'resubAll', action=
'store_true', default=
False, help=
'Resubmit all jobs irrespective of status')
137parser.add_option(
'',
'--resubRunning', dest=
'resubRunning', action=
'store_true', default=
False, help=
'Resubmit all "running" jobs')
138parser.add_option(
'-q',
'--queue', dest=
'batch_queue', default=
None, help=
'Name of batch queue to use (default is context-specific)')
140g_deprecated = OptionGroup(parser,
'Deprecated Options')
141g_deprecated.add_option(
'',
'--mon', dest=
'legacy_mon', action=
'store_true', default=
False, help=
'mon directory structure (now inferred from montaskname)')
142parser.add_option_group(g_deprecated)
144(options, args) = parser.parse_args()
145if len(args) < 1: parser.error(
'wrong number of command line arguments')
150if not options.expertmode:
151 if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
152 sys.exit(
'ERROR: You must run this command in the production directory %s' % options.proddir)
153 if not os.path.exists(produserfile):
154 sys.exit(
'ERROR: Authorization file unreadable or does not exists %s' % produserfile)
155 if not subprocess.getoutput(
'grep `whoami` %s' % produserfile):
156 sys.exit(
'ERROR: You are not authorized to run this command (user name must be listed in produser file %s)' % produserfile)
158 if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
159 print (
'WARNING: You are not running in the production directory %s' % options.proddir)
167 with open(tier0dbinfofile,
'r')
as dbinfofile:
168 connstring = dbinfofile.read().
strip()
170 sys.exit(
'ERROR: Unable to read connection information for Tier-0 database')
171 dbtype, dbname = connstring.split(
':',1)
173 sys.exit(
'ERROR: Invalid T0 connection string')
176 oracle = cx_Oracle.connect(dbname)
178 print (
'ERROR: First connection attempt to Tier-0 Oracle database failed; will retry in 10s ...')
181 oracle = cx_Oracle.connect(dbname)
182 except Exception
as e:
184 sys.exit(
'ERROR: Unable to connect to Tier-0 Oracle database')
186 sys.exit(
'ERROR: Unable to connect to Tier-0 Oracle database (invalid cx_Oracle connection)')
190 ''' Open task manager (used by all following commands, at the very least through subcommands) '''
192 return TaskManager(options.dbconn)
194 print (
'ERROR: Unable to access task manager database %s' % options.dbconn)
199 print (
'ERROR:', message)
203 ''' Given a run number and tag, check input dataset and work out name. '''
204 fs = DiskUtils.FileSet.from_ds_info(run,
205 project=options.project,
206 stream=options.stream,
207 base=options.eospath)
210 .use_files_from(options.filelist)
211 .matching(options.filter + tag +
'.*')
212 .excluding(
r'.*\.TMP\.log.*')
213 .only_single_dataset())
214 dataset = os.path.dirname(datasets[0])
215 dsname =
'.'.join(os.path.basename(datasets[0]).
split(
'.')[:3])
216 return (dataset, dsname)
218def run_jobs(script, ds_name, task_name, params, *args):
219 ''' Invoke runJobs.py '''
220 arg_list = [
'runJobs']
221 arg_list.extend(
map(str, args))
224 for k,v
in params.items():
225 param_args.append(
"{}={}".format(k,repr(v)))
226 arg_list.extend([
'--params',
', '.join(param_args)])
228 arg_list.append(
'--test')
229 arg_list.extend([script, ds_name, task_name])
231 print (subprocess.list2cmdline(arg_list))
232 subprocess.check_call(arg_list)
237if cmd ==
'upload' and len(cmdargs) == 1:
239 if not options.beamspottag:
240 fail(
'No beam spot tag specified')
242 with open(prodcoolpasswdfile,
'r')
as passwdfile:
245 fail(
'Unable to determine COOL upload password')
248 with open(flaskcoolpasswdfile,
'r')
as flaskpasswdfile:
249 flaskpasswd = flaskpasswdfile.read().
strip()
251 fail(
'Unable to determine FLASK upload password')
254 print (
'Beam spot file: ', dbfile)
255 print (
'Uploading to tag: ', options.beamspottag)
256 os.system(
'dumpBeamSpot.py -d %s -t %s %s' % (
262 stat = os.system(
'/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (
264 '--batch' if options.batch
else '',
265 (
'--ignoremode %s' % options.ignoremode)
if options.ignoremode
else '',
273 if stat:
fail(
"UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!")
279if cmd==
'run' and len(args)==3:
284 run_jobs(options.runjoboptions, dsname,
285 '{}-LR5.{}'.format(options.runtaskname, tag),
289 '--files-per-job', 0,
290 '--match', options.filter,
291 '--exclude',
r'.*\.TMP\.log.*',
292 '--directory', dataset)
299if cmd==
'runMon' and len(args)==3:
303 if not options.beamspottag:
304 fail(
'No beam spot tag specified')
310 if 'ESD' in options.filter:
313 run_jobs(options.monjoboptions, dsname,
314 '{}.{}'.format(options.montaskname, tag),
316 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
317 'useBeamSpot' :
True,
318 'beamspottag' : options.beamspottag,
321 '--match', options.filter,
322 '--exclude',
r'.*\.TMP\.log.*',
323 '--directory', dataset)
324 elif options.filelist !=
None:
326 for line
in open(options.filelist,
'r'):
328 lbinfoinfiles =
False
330 lboptions=
'--lbperjob=10' if lbinfoinfiles
else '--files-per-job=10'
331 run_jobs(options.monjoboptions, dsname,
'{}.{}'.format(options.montaskname, tag),
333 'tracksAlreadyOnBeamLine' :
True,
334 'useBeamSpot' :
True,
335 'beamspottag' : options.beamspottag,
338 '--match', options.filter,
339 '--exclude',
r'.*\.TMP\.log.*',
340 '--directory', dataset,
341 '--queue',
'"tomorrow"')
343 queue = options.batch_queue
or '"tomorrow"'
344 print(
'Queue: ', queue )
347 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
348 'useBeamSpot' :
True,
349 'beamspottag' : options.beamspottag
353 for s
in options.params.split(
', '):
359 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
361 run_jobs(options.monjoboptions, dsname,
'{}.{}'.format(options.montaskname, tag),params,
363 '--match', options.filter,
364 '--exclude',
r'.*\.TMP\.log.*',
365 '--directory', dataset,
374if cmd==
'backup' and len(args)==2:
375 if options.archive
and options.incremental:
376 sys.exit(
'ERROR: Cannot do incremental archiving')
377 tmplist = glob.glob(args[1]+
'/')
379 for dslash
in tmplist:
381 if options.incremental:
382 baklog = d+
'/backup.log'
383 if os.path.exists(baklog):
384 cmd =
'find %s -maxdepth 1 -newer %s' % (d,baklog)
385 (status,output) = subprocess.getstatusoutput(cmd)
388 sys.exit(
"ERROR: Error executing %s" % cmd)
396 print (
'\nFound %i directories for backup:\n' % len(dirlist))
400 print (
'\n****************************************************************')
401 print (
'WARNING: ARCHIVING - DIRECTORIES WILL BE DELETED AFTER BACKUP!!!')
402 print (
'****************************************************************')
403 if not options.batch:
404 a = input(
'\nARE YOU SURE [n] ? ')
406 sys.exit(
'ERROR: Aborted by user')
410 outname = d.replace(
'/',
'-')+time.strftime(
'-%G_%m_%d.tar.gz')
413 outname = d.replace(
'/',
'-')+
'.tar.gz'
416 print (
'\nBacking up %s --> %s/%s ...\n' % (d,path,outname))
418 status = os.system(
'tar czf %s/%s %s' % (tmpdir,outname,d)) >> 8
420 sys.exit(
'\nERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
422 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
425 os.system(
'rm -rf %s/%s' % (path,outname) )
426 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
429 print (
'\nERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
432 os.system(
'rm %s/%s' % (tmpdir,outname))
433 status = os.system(
'echo "`date` %s/%s" >> %s/backup.log' % (path,outname,d)) >> 8
435 sys.exit(
'\nERROR: Could not update backup log file')
438 print (
'\nWARNING: DELETION OF SOURCE FILES NOT YET IMPLEMENTED\n')
445if cmd ==
'lsbackup' and not cmdargs:
446 path = archivepath
if options.archive
else backuppath
447 print (
'Backup directory:', path)
448 for f
in DiskUtils.from_directory(path):
456if cmd ==
'show' and len(cmdargs)==1:
459 print (
'Base path: ', options.eospath)
460 print (
'Project tag: ', options.project)
461 print (
'Stream: ', options.stream)
463 print (
'Files available (filtered by: {}):'.format(options.filter))
464 print (
'---------------')
466 fs = DiskUtils.FileSet.from_ds_info(run,
467 project=options.project,
468 stream=options.stream,
469 base=options.eospath)
470 for f
in fs.matching(options.filter):
474 print (
'Beam spot tasks:')
475 print (
'---------------')
477 for t
in taskman.taskIterDict(qual=[
"where DSNAME like '%%%s%%' order by UPDATED" % run]):
478 print (
'%-45s %-45s %4s job(s), last update %s' % (t[
'DSNAME'],t[
'TASKNAME'],t[
'NJOBS'],time.ctime(t[
'UPDATED'])))
485if cmd ==
'postproc' and not cmdargs:
487 for t
in taskman.taskIterDict(
488 qual=[
'where STATUS > %i and STATUS <= %i order by UPDATED' % (
489 TaskManager.StatusCodes[
'RUNNING'],
490 TaskManager.StatusCodes[
'POSTPROCESSING'])]):
491 doPostProcessing(taskman, t, t[
'TASKPOSTPROCSTEPS'].
split(), BeamSpotPostProcessing)
494if cmd ==
'postproc' and len(cmdargs)
in [2,3]:
496 taskname = cmdargs[1]
501 steps = options.postprocsteps
if len(cmdargs) < 3
else cmdargs[2].
split(
',')
503 print (
'Executing postprocessing tasks:', steps)
505 print (
'Executing postprocessing tasks as specified in task database')
510 taskList = getFullTaskNames(taskman,
513 confirmWithUser=
not options.batch,
514 addWildCards=
not options.nowildcards)
515 except TaskManagerCheckError
as e:
517 for taskName
in taskList:
518 t = taskman.getTaskDict(taskName[0], taskName[1])
520 doPostProcessing(taskman, t, steps, BeamSpotPostProcessing, forceRun=
True)
522 doPostProcessing(taskman, t, t[
'TASKPOSTPROCSTEPS'].
split(), BeamSpotPostProcessing, forceRun=
True)
529if cmd==
'upload' and len(args)==3:
532 [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
533 except TaskManagerCheckError
as e:
536 if not options.beamspottag:
537 sys.exit(
'ERROR: No beam spot tag specified')
539 dbfile = glob.glob(
'%s/%s/*-beamspot.db' % (dsname,task))
541 print (
'ERROR: Missing or ambiguous input COOL file:',dbfile)
545 with open(prodcoolpasswdfile,
'r')
as passwdfile:
546 passwd = passwdfile.read().
strip()
548 sys.exit(
'ERROR: Unable to determine COOL upload password')
551 with open(flaskcoolpasswdfile,
'r')
as flaskpasswdfile:
552 flaskpasswd = flaskpasswdfile.read().
strip()
554 fail(
'Unable to determine FLASK upload password')
556 print (
'\nData set: ',dsname)
557 print (
'Beam spot file: ',dbfile[0])
558 print (
'Uploading to tag: ',options.beamspottag)
559 os.system(
'dumpBeamSpot.py -d %s -t %s %s' % (options.srcdbname,options.srctag,dbfile[0]))
561 if options.ignoremode:
562 ignoremode =
'--passopt="--appendlocked --ignoremode %s"' % options.ignoremode
566 batchmode =
'--batch'
570 print(
'command: /afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret <flaskpassword> --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W <password>' % (batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname))
571 stat = os.system(
'/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (flaskpasswd,batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname,passwd))
574 print (
"\n\nERROR: UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
578 uploadflag = dbfile[0]+
'.uploaded'
579 stat = os.system(
'echo "`date` %s" >> %s' % (options.beamspottag,uploadflag) )
581 print (
"ERROR: Uploading was successful, but unable to set upload flag", uploadflag )
583 cooltags = options.beamspottag
591 nextbeamspot = COOLUtils.resolveNextBeamSpotFolder()
594 if nextbeamspot == options.beamspottag:
596 if not (
'UPD' in options.beamspottag
and 'UPD' in nextbeamspot):
598 if nextbeamspot !=
'':
599 print (
'Additionally uploading to Next tag: ',nextbeamspot)
600 cooltags = appendUnique(cooltags, nextbeamspot)
603 t = taskman.getTaskDict(dsname,task)
604 taskman.setValue(dsname,task,
'COOLTAGS',appendUnique(t[
'COOLTAGS'],cooltags))
612if cmd==
'dq2get' and len(args)==3:
615 [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,addWildCards=
not options.nowildcards)
616 except TaskManagerCheckError
as e:
619 dir = os.path.join(dsname, task)
620 griddsname =
'%s.%s-%s' % (options.griduser,dsname,task)
621 path = os.path.join(dir, griddsname)
622 if os.path.exists(path):
623 print (
'ERROR: Path exists already:',path)
625 print (
'Extracting into %s/%s ...' % (dir,griddsname))
626 stat = os.system(
'cd %s; dq2-get -T 6,6 %s' % (dir,griddsname))
628 print (
"ERROR: Problem occurred while extracting data set - task status not changed")
630 statfile = glob.glob(
'%s/000/*.status.SUBMITTED' % (dir))
632 print (
"ERROR: Unable to uniquely identfying status - giving up. Candidate status files are:")
635 os.system(
'rm %s' % statfile[0])
636 basename = statfile[0][:-17]
637 os.system(
'touch %s.exit.0' % basename)
638 os.system(
'touch %s.COMPLETED' % basename)
639 print (
"\nSuccessfully downloaded data set",griddsname)
640 os.system(
'du -hs %s' % path)
648if cmd==
'queryT0' and len(args)==3:
651 [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,addWildCards=
not options.nowildcards)
652 except TaskManagerCheckError
as e:
655 tags = task.split(
'.')[-1]
657 if 'ESD' in options.filter:
658 t0TaskName =
'%s.recon.ESD.%s.beamspotproc.task' % (dsname,tags)
660 if any(t[0] ==
'm' for t
in tags.split(
'_')):
661 t0TaskName =
'%s.merge.AOD.%s.beamspotproc.task' % (dsname,tags)
663 t0TaskName =
'%s.recon.AOD.%s.beamspotproc.task' % (dsname,tags)
665 print (
'Querying Tier-0 database for task',t0TaskName,
'...')
667 cur = oracle.cursor()
670 sql = str(
"SELECT status FROM tasks WHERE taskname='%s' AND tasktype='beamspotproc'" % t0TaskName)
673 except Exception
as e:
675 sys.exit(
'ERROR: Unable to retrieve status of task %s' % t0TaskName)
677 sys.exit(
'ERROR: No such task found: %s' % t0TaskName)
679 sys.exit(
'ERROR: %i tasks found - unable to determine task status' % len(r))
681 print (
'\nTask status = %s\n' % status)
682 if status==
'FINISHED' or status==
'TRUNCATED':
691if cmd==
'runCmd' and len(args)==4:
695 if options.nowildcards:
696 qual = [
"where DSNAME like '%s' and TASKNAME like '%s'" % (dssel,tasksel)]
698 qual = [
"where DSNAME like '%%%s%%' and TASKNAME like '%%%s%%'" % (dssel,tasksel)]
699 if options.beamspottag:
700 qual.append(
"and COOLTAGS like '%%%s%%'" % options.beamspottag)
702 qual.append(
"order by RUNNR")
703 print (
'Running command\n')
704 print (
' ',cmd % ({
'DSNAME':
'DSNAME',
'TASKNAME':
'TASKNAME',
'RUNNR':
'RUNNR',
'FULLDSNAME':
'FULLDSNAME'}))
705 print (
'\nover the following datasets / tasks:\n')
706 print (
' %-10s %-40s %s' % (
'Run',
'Dataset',
'Task'))
707 print (
' %s' % (74*
'-'))
711 for t
in taskman.taskIterDict(
'*',qual):
715 if options.nominbias
and dsname.find(
'physics_MinBias') :
716 print (
'Warning: changing physics_MinBias in dsname to express_express')
717 dsname = dsname.replace(
'physics_MinBias',
'express_express')
721 taskname = t[
'TASKNAME']
722 print (
' %-10s %-40s %s'% (runnr,dsname,taskname))
724 if options.excludeiftask:
725 if options.nowildcards:
727 n = taskman.getNTasks([
"where DSNAME ='%s' and TASKNAME like '%s'" % (dsname,options.excludeiftask)])
729 n = taskman.getNTasks([
"where DSNAME ='%s' and TASKNAME like '%%%s%%'" % (dsname,options.excludeiftask)])
731 print (
' ==> SKIPPING - %i matching tasks found' % n)
734 if options.excludeds:
735 excludeList = options.excludeds.split(
',')
736 for s
in excludeList:
738 print (
' ==> SKIPPING dataset')
742 stat, out = subprocess.getstatusoutput(
'grep -c %s %s' % (dsname, options.dslist))
749 print (
' ==> SKIPPING - dataset %s not found in %s' % (dsname, options.dslist))
757 stat, out = subprocess.getstatusoutput(
'grep %s %s' % (dsname, options.dslist))
762 t[
'FULLDSNAME'] = out.strip()
767 print (
'\nNo jobs need to be run.\n')
770 print (
'\n%i datasets / tasks selected.\n' % len(taskList))
771 if not options.batch:
772 a = input(
'\nARE YOU SURE [n] ? ')
774 sys.exit(
'ERROR: Running of monitoring tasks aborted by user')
778 print (
'\nExecuting: ',fullcmd,
' ...')
789if cmd==
'runMonJobs' and len(args)<3:
791 earliestUpdateTime = time.time()-float(args[1])
793 earliestUpdateTime = 0
794 if not options.beamspottag:
795 sys.exit(
'ERROR: No beam spot tag specified')
797 print (
'Running the following monitoring tasks for tasks of type %s:\n' % options.runtaskname)
798 print (
' %-10s %s' % (
'RUN',
'MONITORING TASK'))
799 print (
' %s' % (60*
'-'))
801 onDiskCode = TaskManager.OnDiskCodes[
'ALLONDISK']
804 for t
in taskman.taskIterDict(
'*',[
"where TASKNAME like '%s%%'" % options.runtaskname,
805 "and UPDATED >= ", DbParam(earliestUpdateTime),
806 "and ONDISK = ", DbParam(onDiskCode),
807 "and COOLTAGS like '%%%s%%'" % options.beamspottag,
808 "order by RUNNR desc"]):
811 taskName = t[
'TASKNAME']
812 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
813 monTaskName =
'MON.%s.%s' % (taskName,datatag)
815 if options.useRun
is not None:
817 if runnr == options.useRun:
822 m = next(taskman.taskIterDict(
'*',[
"where RUNNR =",DbParam(runnr),
"and DSNAME =",DbParam(dsname),
"and TASKNAME =",DbParam(monTaskName),
"order by UPDATED desc"]))
823 print (
' %-10s %s %s'% (runnr,dsname,monTaskName))
825 print (
' * %-10s %s %s'% (runnr,dsname,
'--- no monitoring task found ---'))
830 print (
'\nNo jobs need to be run.\n')
833 if not options.batch:
834 a = input(
'\nARE YOU SURE [n] ? ')
836 sys.exit(
'ERROR: Running of monitoring tasks aborted by user')
843 ptag = dsname.split(
'.')[0]
844 stream = dsname.split(
'.')[2]
845 taskName = t[
'TASKNAME']
846 fulldatatag = taskName.split(
'.')[-1]
847 datatag = fulldatatag.split(
'_')[0]
848 monTaskName =
'MON.%s' % (taskName)
852 cooltags = t[
'COOLTAGS']
853 if not cooltags: cooltags =
''
854 bstag = cooltags.split()[0]
857 if any(t[0] ==
'm' for t
in fulldatatag.split(
'_')):
858 t0dsname =
'%s.merge.AOD.%s%%' % (dsname, datatag)
860 t0dsname =
'%s.recon.AOD.%s%%' % (dsname, datatag)
862 c = getJobConfig(
'.',dsname,taskName)
863 if 'ESD' in c[
'inputfiles'][0]:
865 t0dsname =
'%s.recon.ESD.%s' % (dsname, datatag)
867 print (
'\nRunning monitoring job for run %s:' % runnr)
870 eospath=options.eospath
874 if int(runnr)<240000:
875 print (
'... Querying T0 database for replication of %s' % t0dsname)
876 cur = oracle.cursor()
877 cur.execute(
"select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
880 print (
' WARNING: input data not yet replicated - please retry later')
883 print (
'... Querying T0 database for completion of merging jobs of %s' % t0dsname)
884 cur = oracle.cursor()
885 origt0TaskName=
'%s.recon.AOD.%s%%.aodmerge.task' % (dsname,datatag)
886 cur.execute(
"select status from tasks where taskname like '%s' and tasktype='aodmerge'" % origt0TaskName)
889 print (
' WARNING: can\'t get status of merge job for %s, running on un-merged samples instead' % origt0TaskName)
890 eospath=
'/eos/atlas/atlastier0/tzero/prod'
891 elif not (r[0][0]==
'FINISHED' or r[0][0]==
'TRUNCATED'):
892 print (
' Merge job for taskname %s is not finished yet, has status %s, running on un-merged samples instead.' % (origt0TaskName, r[0][0]))
893 eospath=
'/eos/atlas/atlastier0/tzero/prod'
895 print (
' Merge job is finished, launching jobs.')
899 if int(runnr)<240000:
901 print (
'... Submitting monitoring task')
902 queue = options.batch_queue
or '\'\"tomorrow\"\''
905 paramValues =
'--params \''+options.params+
'\''
910 cmd =
'beamspotman --eospath=%s -p %s -s %s -f \'.*\\.%s\\..*\' -t %s --queue %s %s %s --montaskname %s runMon %i %s' % (eospath,ptag,stream,filter,bstag,queue,paramValues,testFlag,monTaskName,int(runnr),datatag)
913 status = os.system(cmd) >> 8
915 print (
'\nERROR: Job submission returned error - exit code %i\n')
924if cmd==
'archive' and len(args)==3:
925 if not options.batch:
926 print (
'\nWARNING: If you confirm below, each of the following datasets will:')
927 print (
' - be archived to EOS')
928 if options.resultsondisk:
929 print (
' - will be marked as RESULTSONDISK in the task database')
930 print (
' - all except the results files *** WILL BE DELETED ***')
932 print (
' - will be marked as ARCHIVED in the task database')
933 print (
' - all its files *** WILL BE DELETED ***')
938 taskList = getFullTaskNames(taskman,args[1],args[2],confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
939 except TaskManagerCheckError
as e:
945 onDiskCode = TaskManager.OnDiskCodes.get(
'ALLONDISK',
None)
946 archivedCode = TaskManager.OnDiskCodes.get(
'RESULTSONDISK',
None)
if options.resultsondisk
else TaskManager.OnDiskCodes.get(
'ARCHIVED',
None)
947 exceptList = [
'*dqflags.txt',
'*.gif',
'*.pdf',
'*.config.py*',
'*.argdict.gpickle',
'*.AveBeamSpot.log',
'*.PlotBeamSpotCompareReproc.log',
'*.sh',
'*.BeamSpotNt*',
'*.BeamSpotGlobalNt.log',
'*.status.*',
'*.exit.*']
949 for (dsname,taskname)
in taskList:
950 t = taskman.getTaskDict(dsname,taskname)
953 if t[
'ONDISK'] != onDiskCode:
954 print (
'Skipping task %s / %s status %s (task files must be on disk)' % (dsname,taskname,getKey(TaskManager.OnDiskCodes,t[
'ONDISK'])))
958 dir =
'%s/%s' % (dsname,taskname)
959 outname = dir.replace(
'/',
'-')+time.strftime(
'-%G_%m_%d.tar.gz')
960 print (
'Archiving task %s / %s ...' % (dsname,taskname))
961 print (
' --> %s/%s ...' % (path,outname))
964 if dir==
'.' or dir==
'*':
965 print (
'\n**** FATAL ERROR: Very dangerous value of task directory found: %s - ABORTING' % dir)
969 if os.path.exists(dir):
970 status = os.system(
'tar czf %s/%s %s' % (tmpdir,outname,dir)) >> 8
972 sys.exit(
'\n**** ERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
973 status = os.system(
'xrdcp %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
975 os.system(
'rm -rf %s/%s' % (path,outname) )
976 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
978 sys.exit(
'\n**** ERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
980 os.system(
'rm %s/%s' % (tmpdir,outname))
981 n = taskman.setValue(dsname,taskname,
'ONDISK',archivedCode)
983 sys.exit(
'\n**** ERROR: Unexpected number of tasks modified: %i instead of 1 (DSNAME=%s,TASKNAME=%s)' % (n,dsname,taskname))
985 if options.resultsondisk:
986 oscmd =
r"find %s ! \( -name '%s' \) -type f -exec rm {} \;" % (dir,
"' -or -name '".join(exceptList))
989 os.system(
'rm -rf %s' % dir)
991 print (
'\n**** ERROR: No task directory',dir,
'\n')
1001if cmd==
'resubmit' and len(args)
in [3,4]:
1009 queue = args[3]
if len(args) == 4
else options.batch_queue
1011 print (
'ERROR: No queue was specified (use -q)')
1014 basepath = os.path.join(os.getcwd(), dsname, taskname)
1015 dircontents = os.listdir(basepath)
1017 condorScriptTemplate=
"""executable = %(scriptfile)s
1018arguments = $(ClusterID) $(ProcId)
1019output = %(logfile)s.out
1020error = %(logfile)s.err
1021log = %(logfile)s.log
1023+JobFlavour = %(batchqueue)s
1028 for dir
in dircontents:
1029 if not os.path.isdir(os.path.join(basepath, dir)):
1033 if (options.montaskname
in taskname.split(
'.'))
or options.legacy_mon:
1034 jobname =
'-'.join([dsname, taskname,
'lb' + dir])
1035 fullpath = os.path.join(basepath, dir)
1039 for f
in os.listdir(fullpath):
1040 if re.search(
'RUNNING', f):
1042 if re.search(
'COMPLETED',f)
or re.search(
'POSTPROCESSING',f):
1043 with open(os.path.join(fullpath, jobname +
'.exitstatus.dat'))
as statusFile:
1044 status = statusFile.read(1)
1049 if options.resubRunning
and isRunning:
1050 print (
"Will resubmit running job")
1051 elif (isRunning
or not isFailed)
and not options.resubAll:
1055 for f
in os.listdir(fullpath):
1056 if re.search(
'.exitstatus.', f):
1057 os.remove(os.path.join(fullpath, f))
1058 elif re.search(
'.exit.', f):
1059 os.remove(os.path.join(fullpath, f))
1060 elif re.search(
'.status.', f):
1061 os.remove(os.path.join(fullpath, f))
1062 elif re.search(
'.log', f):
1063 os.remove(os.path.join(fullpath, f))
1064 elif re.search(
'.py.final.py', f):
1065 os.remove(os.path.join(fullpath, f))
1069 'batchqueue' : queue,
1070 'jobname' : jobname,
1071 'jobdir' : fullpath,
1073 jobConfig[
'logfile'] =
'%(jobdir)s/%(jobname)s.log' % jobConfig
1074 jobConfig[
'scriptfile'] =
'%(jobdir)s/%(jobname)s.sh' % jobConfig
1076 condorScript = condorScriptTemplate % jobConfig
1077 print (condorScript)
1078 script = open(
'condorSubmit.sub',
'w')
1079 script.write(condorScript)
1081 os.chmod(
'condorSubmit.sub',0o755)
1083 batchCmd =
'condor_submit condorSubmit.sub'
1089 print (taskman.getStatus(dsname, taskname))
1090 taskman.setStatus(dsname, taskname, TaskManager.StatusCodes[
'RUNNING'] )
1099if cmd==
'reproc' and len(args)==5:
1100 from InDetBeamSpotExample
import HTCondorJobRunner
1107 lbperjob = options.lbperjob
1108 params = {
'LumiRange': lbperjob}
1109 cmd =
' '.join(sys.argv)
1113 for s
in options.params.split(
', '):
1119 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
1130 backend = DiskUtils.EOS()
if options.eos
else None
1131 fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1132 fs = fs.matching(options.filter).use_files_from(options.filelist).only_latest(options.removedups)
1133 for f, lbs
in file_set.with_lumi_blocks(options.lbfilemap):
1136 if not files:
fail(
'No files were found.')
1145 jobFileDict[jobId] = files
1146 jobLBDict[jobId] = []
1150 lbs = sorted(lbMap[f.split(
'/')[-1]])
1152 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1154 jobLBDict[jobId].extend(lbs)
1158 lbs = sorted(lbMap[f.split(
'/')[-1]])
1160 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1163 jobId = int((lbnr-1)/lbperjob)
1165 if not jobId
in jobFileDict:
1166 jobFileDict[jobId] = [f]
1167 jobLBDict[jobId] = [lbnr]
1169 if not f
in jobFileDict[jobId]:
1170 jobFileDict[jobId].append(f)
1171 jobLBDict[jobId].append(lbnr)
1175 for i
in sorted(jobFileDict.keys()):
1176 jobnr = i*lbperjob+1
1177 files=jobFileDict[i]
1178 lbs = sorted(
set(jobLBDict[i]))
1183 intlbs.append(int(lbnr))
1184 params[
'lbList'] = intlbs
1185 jobname=dsname+
'-'+taskname+
'-lb%03i' % jobnr
1187 queue = options.batch_queue
or '"tomorrow"'
1188 runner = HTCondorJobRunner.HTCondorJobRunner(
1190 jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1194 joboptionpath=jobopts,
1195 filesperjob=len(files),
1197 addinputtopoolcatalog=
True,
1198 taskpostprocsteps=
'ReprocVertexDefaultProcessing',
1200 autoconfparams=
'DetDescrVersion',
1201 returnstatuscode=
True,
1208 except Exception
as e:
1209 print (
"ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1210 print (
"DEBUG: Exception =",e)
1212 taskman.addTask(dsname, taskname, jobopts, runner.getParam(
'release'), runner.getNJobs(), runner.getParam(
'taskpostprocsteps'), comment=cmd)
1223if cmd==
'runaod' and len(args)==5:
1225 from InDetBeamSpotExample
import HTCondorJobRunner
1231 lbperjob = options.lbperjob
1232 params = {
'LumiRange': lbperjob}
1235 if options.pseudoLbFile:
1236 params = {
'LumiRange': 1}
1238 cmd =
' '.join(sys.argv)
1242 for s
in options.params.split(
', '):
1248 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
1251 backend = DiskUtils.EOS()
if options.eos
else None
1252 fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1253 print (
"****************************************************")
1254 print (
"*************** printing files *********************")
1256 fs = fs.matching(options.filter)
1257 for f, lbs
in fs.with_lumi_blocks(options.lbfilemap):
1261 if not files:
fail(
'No files were found.')
1268 if options.pseudoLbFile:
1271 coolQuery = COOLQuery()
1272 from InDetBeamSpotExample.Utils
import getRunFromName
1273 lbTimes = coolQuery.getLbTimes( getRunFromName(dsname,
None,
True) )
1276 with open(options.pseudoLbFile)
as pLbFile:
1277 for line
in pLbFile:
1278 if line[0] ==
'#':
continue
1280 tokens = line.split()
1281 plbnr,tstart,tend = int(tokens[0]),int(tokens[1]),int(tokens[2])
1282 jobId = int(plbnr/lbperjob)
1286 if not options.noCheckAcqFlag
and len(tokens)>=5
and abs(float(tokens[4])-1.)>0.001:
1287 print (
"Point is not stationary -- skipping job %d" % jobId)
1291 rlbs = [lb
for (lb,time)
in lbTimes.items()
if (time[0] - tend/1e9)*(time[1] - tstart/1e9) < 0]
1297 lbs = sorted(lbMap[f])
1299 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1301 if not sum([lb
for lb
in lbs
if lb
in rlbs]):
continue
1305 jobLBDict[jobId].extend([lb
for lb
in rlbs
if not lb
in jobLBDict[jobId]])
1306 jobFileDict[jobId].extend([f
for f
in filenames
if not f
in jobFileDict[jobId]])
1307 jobParams[jobId][
'lbData'].append(line.strip(
'\n').
strip())
1309 jobLBDict[jobId] = rlbs
1310 jobFileDict[jobId] = filenames
1311 jobParams[jobId] = {
'lbData' : [line.strip(
'\n').
strip()]}
1316 lbs = sorted(lbMap[f])
1318 print (
'WARNING: No mapping for file %s. Skipping' % f.split(
'/')[-1])
1323 jobId = int((lbnr-1)/lbperjob)
1325 if not jobId
in jobFileDict:
1326 jobFileDict[jobId] = [f]
1327 jobLBDict[jobId] = [lbnr]
1329 if not f
in jobFileDict[jobId]:
1330 jobFileDict[jobId].append(f)
1331 jobLBDict[jobId].append(lbnr)
1335 for i
in sorted(jobFileDict.keys()):
1336 jobnr = i*lbperjob+1
1337 files=jobFileDict[i]
1338 lbs = sorted(
set(jobLBDict[i]))
1342 intlbs.append(int(lbnr))
1343 params[
'lbList'] = intlbs
1348 for k,v
in p.items(): params[k] = v
1352 jobname=dsname+
'-'+taskname+
'-lb%03i' % jobnr
1354 queue = options.batch_queue
1357 queue=
'"tomorrow"' if options.pseudoLbFile
else '"tomorrow"'
1358 runner = HTCondorJobRunner.HTCondorJobRunner(
1360 jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1364 joboptionpath=jobopts,
1365 filesperjob=len(files),
1367 addinputtopoolcatalog=
True,
1368 taskpostprocsteps=
' '.join(options.postprocsteps),
1369 autoconfparams=
'DetDescrVersion',
1370 returnstatuscode=
True,
1373 if options.testonly:
1378 except Exception
as e:
1379 print (
"ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1380 print (
"DEBUG: Exception =",e)
1382 taskman.addTask(dsname, taskname, jobopts, runner.getParam(
'release'), runner.getNJobs(), runner.getParam(
'taskpostprocsteps'), comment=cmd)
1392if cmd==
'dqflag' and len(args)==2:
1394 if not options.dqtag:
1395 sys.exit(
'ERROR: No beamspot DQ tag specified')
1397 with open(proddqcoolpasswdfile,
'r')
as passwdfile:
1398 passwd = passwdfile.read().
strip()
1400 sys.exit(
'ERROR: Unable to determine DQ COOL upload password')
1402 print (
'\nBeam spot DQ file: ',dbfile)
1403 print (
'Uploading to tag: ',options.dqtag)
1405 if options.ignoremode:
1406 ignoremode =
'--ignoremode %s' % options.ignoremode
1411 batchmode =
'--batch'
1416 cmd =
'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1419 stat = os.system(cmd)
1425 print (
"\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1432if cmd==
'dqflag' and len(args)==3:
1435 [(dsname,task)] = getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
1436 except TaskManagerCheckError
as e:
1440 if not options.dqtag:
1441 sys.exit(
'ERROR: No beam spot DQ tag specified')
1443 dbfile = glob.glob(
'%s/%s/*-dqflags.db' % (dsname,task))
1445 print (
'ERROR: Missing or ambiguous input COOL DQ file:',dbfile)
1449 with open(proddqcoolpasswdfile,
'r')
as passwdfile:
1450 passwd = passwdfile.read().
strip()
1452 sys.exit(
'ERROR: Unable to determine DQ COOL upload password')
1454 print (
'\nData set: ',dsname)
1455 print (
'Beam spot DQ file: ',dbfile[0])
1456 print (
'Uploading to tag: ',options.dqtag)
1458 if options.ignoremode:
1459 ignoremode =
'--ignoremode %s' % options.ignoremode
1463 batchmode =
'--batch'
1467 cmd =
'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1468 print (
"Running %s" % cmd)
1469 stat = os.system(cmd)
1474 print (
"\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1478 uploadflag = dbfile[0]+
'.uploaded'
1479 stat = os.system(
'echo "`date` %s" >> %s' % (options.dqtag,uploadflag) )
1481 print (
"ERROR: Uploading DQ flag was successful, but unable to set upload flag", uploadflag)
1488if cmd==
'runBCID' and len(args)==3:
1498 run_jobs(options.bcidjoboptions, dsname, options.bcidtaskname,
1500 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
1501 'SeparateByBCID' :
True,
1502 'VertexNtuple' :
False,
1504 '--files-per-job', 0,
1505 '--match', options.filter,
1506 '--exclude',
r'.*\.TMP\.log.*',
1507 '-postprocsteps',
'BCIDDefaultProcessing')
1514if cmd==
'runBCIDJobs' and len(args)<3:
1516 earliestUpdateTime = time.time()-float(args[1])
1518 earliestUpdateTime = 0
1520 if not options.beamspottag:
1521 sys.exit(
'ERROR: No beam spot tag specified')
1523 print (
'Running the following BCID tasks for tasks of type %s:\n' % options.runtaskname)
1524 print (
' %-10s %s' % (
'RUN',
'BCID TASK'))
1525 print (
' %s' % (60*
'-'))
1529 for t
in taskman.taskIterDict(
'*',[
"where TASKNAME like '%s%%'" % options.runtaskname,
1530 "and UPDATED >= ", DbParam(earliestUpdateTime),
1531 "and COOLTAGS like '%%%s%%'" % options.beamspottag,
1532 "order by RUNNR desc"]):
1533 dsname = t[
'DSNAME']
1535 taskName = t[
'TASKNAME']
1536 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
1537 bcidTaskName =
'BCID.%s.%s' % (taskName,datatag)
1540 m = next(taskman.taskIterDict(
'*',[
'where RUNNR =',DbParam(runnr),
'and DSNAME =',DbParam(dsname),
'and TASKNAME =',DbParam(bcidTaskName),
'order by UPDATED desc']))
1541 print (
' %-10s %s'% (runnr,bcidTaskName))
1543 print (
' * %-10s %s'% (runnr,
'--- no BCID task found ---'))
1548 print (
'\nNo jobs need to be run.\n')
1551 if not options.batch:
1552 a = input(
'\nARE YOU SURE [n] ? ')
1554 sys.exit(
'ERROR: Running of BCID tasks aborted by user')
1559 dsname = t[
'DSNAME']
1561 ptag = dsname.split(
'.')[0]
1562 stream = dsname.split(
'.')[2]
1563 taskName = t[
'TASKNAME']
1564 fulldatatag = taskName.split(
'.')[-1].
split(
'_')[0]
1565 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
1566 bcidTaskName =
'BCID.%s.%s' % (taskName,datatag)
1569 if any(t[0] ==
'm' for t
in fulldatatag.split(
'_')):
1570 t0dsname =
'%s.merge.%s.%s%%' % (dsname, filter, datatag)
1572 t0dsname =
'%s.recon.%s.%s%%' % (dsname, filter, datatag)
1574 c = getJobConfig(
'.',dsname,taskName)
1575 if 'ESD' in c[
'inputfiles'][0]:
1577 t0dsname =
'%s.recon.ESD.%s' % (dsname, datatag)
1579 print (
'\nRunning BCID job for run %s:' % runnr)
1581 print (
'... Querying T0 database for replication of %s' % t0dsname)
1582 cur = oracle.cursor()
1583 cur.execute(
"select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
1586 print (
' WARNING: input data not yet replicated - please retry later')
1589 print (
'... Submitting BCID task')
1590 cmd =
'beamspotman.py -p %s -s %s -f \'.*\\.%s\\..*\' --bcidtaskname %s runBCID %i %s' % (ptag,stream,filter,
'BCID.'+taskName,int(runnr),datatag)
1593 status = os.system(cmd) >> 8
1595 print (
'\nERROR: Job submission returned error - exit code %i\n')
1604if cmd==
'mctag' and len(args)<12:
1608 if not options.beamspottag:
1609 sys.exit(
'ERROR: No beam spot tag specified')
1611 dbfile=options.beamspottag +
'.db'
1612 folderHandle = openBeamSpotDbFile(dbfile, dbName =
'OFLP200', forceNew =
True)
1614 runMin = options.runMin
if options.runMin
is not None else 0
1615 runMax = options.runMax
if options.runMax
is not None else (1 << 31)-1
1617 writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1618 runMin=runMin, runMax=runMax,
1619 status=int(args[1]),
1620 posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1621 sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1622 tiltX=float(args[8])
if len(args)>8
else 0.,
1623 tiltY=float(args[9])
if len(args)>9
else 0.,
1624 sigmaXY=float(args[10])
if len(args)>10
else 0.,
1625 posXErr=0., posYErr=0., posZErr=0.,
1626 sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1627 tiltXErr=0., tiltYErr=0.,
1630 print (
'* MC beamspot tag written to db=OFLP200, tag=%s in %s ' %(options.beamspottag, dbfile))
1631 print (
' - AtlCoolConsole.py "sqlite://;schema=' + dbfile +
';dbname=OFLP200"')
1632 print (
'* To upload to oracle use:')
1633 print (
' - beamspotman.py --srctag %s -t %s --srcdbname OFLP200 --destdbname OFLP200 upload %s' %(options.beamspottag, options.beamspottag, dbfile))
1641if cmd==
'maketag' and len(args)<12:
1645 if not options.beamspottag:
1646 sys.exit(
'ERROR: No beam spot tag specified')
1648 dbfile=options.beamspottag +
'.db'
1649 dbName=options.destdbname
1650 folderHandle = openBeamSpotDbFile(dbfile, dbName = options.destdbname, forceNew =
True)
1652 runMin = options.runMin
if options.runMin
is not None else 0
1653 runMax = options.runMax
if options.runMax
is not None else (1 << 31)-1
1655 writeBeamSpotEntry(folderHandle, tag=options.beamspottag,
1656 runMin=runMin, runMax=runMax,
1657 status=int(args[1]),
1658 posX=float(args[2]), posY=float(args[3]), posZ=float(args[4]),
1659 sigmaX=float(args[5]), sigmaY=float(args[6]), sigmaZ=float(args[7]),
1660 tiltX=float(args[8])
if len(args)>8
else 0.,
1661 tiltY=float(args[9])
if len(args)>9
else 0.,
1662 sigmaXY=float(args[10])
if len(args)>10
else 0.,
1663 posXErr=0., posYErr=0., posZErr=0.,
1664 sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1665 tiltXErr=0., tiltYErr=0.,
1668 print (
'* Beamspot tag written to db=%s, tag=%s in %s ' %(dbName,options.beamspottag, dbfile))
1669 print (
' - AtlCoolConsole.py "sqlite://;schema=' + dbfile +
';dbname=%s"' %(dbName))
1670 print (
'* To upload to oracle use:')
1671 print (
' - beamspotman.py --srctag %s -t %s --srcdbname %s --destdbname %s upload %s' %(options.beamspottag, options.beamspottag, dbName, dbName, dbfile))
1672 print (
' - /afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_INDET_W <passwd>' %(dbfile, dbName))
1676print (
'ERROR: Illegal command or number of arguments ({})'.format(
' '.join(args)))
void print(char *figname, TCanvas *c1)
std::vector< std::string > split(const std::string &s, const std::string &t=":")
check_commsep(option, opt, value)
run_jobs(script, ds_name, task_name, params, *args)
dataset_from_run_and_tag(run, tag)