7 beamspotman is a command line utility to do typical beam spot related tasks.
9 __authors__ = [
'Juerg Beringer',
'Carl Suster']
10 __version__ =
'beamspotman.py atlas/athena'
11 __usage__ =
'''%prog [options] command [args ...]
15 show RUNNR For given run, show data on CAF and beam spot jobs
16 run RUNNR TAG Run standard beam spot reconstruction job
17 runCmd DSNAME TASKNAME CMD Repeat command for matching tasks (use %(DSNAME)s and
18 %(TASKNAME)s in the command string CMD to specify the
19 dataset and task names, if needed)
20 runMon RUNNR TAG Run standard beam spot monitoring job
21 runMonJobs [MAXTIME] Run monitoring jobs for jobs completed at most MAXTIME ago
22 postproc Default postprocessing for any task that needs it
23 postproc DSNAME TASKNAME [WHATLIST] Postprocessing for a selected set of tasks
24 upload DBFILE Upload SQLite file into COOL (independent of task)
25 upload DSNAME TASKNAME Upload result of beam spot determination into COOL
26 dq2get DSNAME TASKNAME Retrieve task data from grid job (must have set up grid env.)
27 queryT0 DSNAME TASKNAME Query Tier-0 database about a task
28 backup DIR Backup directory DIR (may contain wildcards) to EOS
29 archive DSNAME TASKNAME Archive task data (deletes files after copying to EOS,
30 sets on-disk status to ARCHIVED)
31 lsbackup List contents of backup directory on EOS
32 reproc TEMPLATE DSNAME TASKNAME INPUTDIR Run reprocessing task consisting of several jobs split into 5 LBs
33 over the files in INPUTDIR
34 runaod TEMPLATE DSNAME TASKNAME INPUTDIR Run over AOD, splitting jobs into sets of N LBs (similar
35 to reproc command, except for variable params)
36 resubmit DSNAME TASKNAME Rerun jobs of a specific task (choose queue with -q QUEUE)
37 dqflag DBFILE Upload DQ SQLite file into COOL (independent of task)
38 dqflag DSNAME TASKNAME Upload result of beam spot DQ flag determination into COOL
39 runBCID RUNNR TAG Run standard beam spot BCID job
40 runBCIDJobs [MAXTIME] Run BCID jobs for jobs completed at most MAXTIME ago
41 mctag STATUS POSX POSY POSZ Create an sqlite file containing a MC tag with the
42 SIGMAX SIGMAY SIGMAZ parameters given (and zero errors). A la beamSpot_set.py.
48 proddir =
'/afs/cern.ch/user/a/atlidbs/jobs'
49 produserfile =
'/afs/cern.ch/user/a/atlidbs/private/produsers.dat'
50 prodcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/coolinfo.dat'
51 flaskcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/flaskinfo.dat'
52 proddqcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/cooldqinfo.dat'
53 tier0dbinfofile =
'/afs/cern.ch/user/a/atlidbs/private/t0dbinfo.dat'
55 backuppath =
'/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/backup'
56 archivepath =
'/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/archive'
65 from InDetBeamSpotExample.PostProcessing
import doPostProcessing
66 from InDetBeamSpotExample
import BeamSpotPostProcessing
67 from InDetBeamSpotExample
import COOLUtils
68 from InDetBeamSpotExample
import DiskUtils
70 from future
import standard_library
71 standard_library.install_aliases()
73 from optparse
import Option, OptionParser, OptionGroup
75 return re.split(
r'\s*,\s*|\s+', value)
77 TYPES = Option.TYPES + (
'commsep',)
78 TYPE_CHECKER =
copy(Option.TYPE_CHECKER)
79 TYPE_CHECKER[
'commsep'] = check_commsep
81 parser = OptionParser(usage=__usage__, version=__version__, option_class=BeamSpotOption)
83 g_input = OptionGroup(parser,
'Input file options')
84 g_input.add_option(
'',
'--eos', dest=
'eos', default=
False, action=
'store_true', help=
'access files over EOS (not needed if /eos is mounted)')
85 g_input.add_option(
'-e',
'--eospath', dest=
'eospath', default=
'/eos/atlas/atlastier0/rucio', help=
'eos path (excluding project and stream name)')
86 g_input.add_option(
'-p',
'--project', dest=
'project', default=
'data17_13TeV', help=
'project name')
87 g_input.add_option(
'-s',
'--stream', dest=
'stream', default=
'calibration_BeamSpot', help=
'stream name')
88 g_input.add_option(
'-f',
'--filter', dest=
'filter', default=
r'.*\.AOD\..*', help=
'regular expression to filter input files')
89 g_input.add_option(
'',
'--lbfilemap', dest=
'lbfilemap', default=
'', help=
'text file with mapping between filename and lumi blocks')
90 g_input.add_option(
'',
'--rucio', dest=
'rucio', action=
'store_true', default=
False, help=
'rucio directory structure')
91 g_input.add_option(
'',
'--dpdinput', dest=
'dpdinput', action=
'store_true', default=
False, help=
'Run over DPD for runaod')
92 g_input.add_option(
'-l',
'--filelist', dest=
'filelist', default=
None, help=
'Explicit list of files for reproc command')
93 g_input.add_option(
'',
'--removedups', dest=
'removedups', action=
'store_true', default=
False, help=
'Remove duplicate retry files of form root.N, keeping the latest')
94 parser.add_option_group(g_input)
96 g_mode = OptionGroup(parser,
'Mode flags')
97 g_mode.add_option(
'-b',
'--batch', dest=
'batch', action=
'store_true', default=
False, help=
'batch mode - never ask for confirmation')
98 g_mode.add_option(
'',
'--test', dest=
'testonly', action=
'store_true', default=
False, help=
'for runaod, show only options and input files')
99 g_mode.add_option(
'',
'--expertmode', dest=
'expertmode', action=
'store_true', default=
False, help=
'expert mode (BE VERY CAREFUL)')
100 g_mode.add_option(
'',
'--ignoremode', dest=
'ignoremode', default=
'', help=
'ignore update protection mode (needs expert pwd)')
101 parser.add_option_group(g_mode)
104 parser.add_option(
'-t',
'--beamspottag', dest=
'beamspottag', default=beamspottag, help=
'beam spot tag')
105 parser.add_option(
'-d',
'--dbconn', dest=
'dbconn', default=
'', help=
'task manager database connection string (default: check TASKDB, otherwise use sqlite_file:taskdata.db)')
106 parser.add_option(
'',
'--proddir', dest=
'proddir', default=proddir, help=
'production directory (default: %s' % proddir)
107 parser.add_option(
'',
'--destdbname', dest=
'destdbname', default=
'CONDBR2', help=
'destination database instance name (default: CONDBR2)')
108 parser.add_option(
'',
'--srcdbname', dest=
'srcdbname', default=
'BEAMSPOT', help=
'source database instance name (default: BEAMSPOT)')
109 parser.add_option(
'',
'--srctag', dest=
'srctag', default=
'nominal', help=
'source tag (default: nominal)')
110 parser.add_option(
'-r',
'--runjoboptions', dest=
'runjoboptions', default=
'InDetBeamSpotExample/VertexTemplate.py', help=
'template to run beam spot jobs')
111 parser.add_option(
'',
'--runtaskname', dest=
'runtaskname', default=
'VTX', help=
'task name')
112 parser.add_option(
'-m',
'--monjoboptions', dest=
'monjoboptions', default=
'runBeamSpotMonitor.py', help=
'template to run monitoring jobs')
113 parser.add_option(
'',
'--bcidjoboptions', dest=
'bcidjoboptions', default=
'InDetBeamSpotExample/VertexTemplate.py', help=
'template to run BCID jobs')
114 parser.add_option(
'',
'--montaskname', dest=
'montaskname', default=
'MON', help=
'task name')
115 parser.add_option(
'',
'--bcidtaskname', dest=
'bcidtaskname', default=
'BCID', help=
'BCID task name')
116 parser.add_option(
'-g',
'--griduser', dest=
'griduser', default=
'user10.JuergBeringer', help=
'grid user name prefix (e.g. user10.JuergBeringer)')
117 parser.add_option(
'-n',
'--nowildcards', dest=
'nowildcards', action=
'store_true', default=
False, help=
'do not add wildcards when looking up dataset and task names')
118 parser.add_option(
'-x',
'--excludeiftask', dest=
'excludeiftask', default=
'', help=
'Exclude running cmd with runCmd if such task exists already')
119 parser.add_option(
'',
'--excludeds', dest=
'excludeds', default=
'', help=
'Exclude running cmd with runCmd for dataset containing a certain string')
120 parser.add_option(
'',
'--archive', dest=
'archive', action=
'store_true', default=
False, help=
'archive, ie delete data after backup')
121 parser.add_option(
'',
'--incremental', dest=
'incremental', action=
'store_true', default=
False, help=
'incremental backup')
122 parser.add_option(
'',
'--lbperjob', dest=
'lbperjob', type=
'int', default=5, help=
'number of luminosity blocks per job (default: 5)')
123 parser.add_option(
'',
'--params', dest=
'params', default=
'', help=
'job option parameters to pass to job option template')
124 parser.add_option(
'-z',
'--postprocsteps', dest=
'postprocsteps', type=
'commsep', default=[
'JobPostProcessing'], help=
'Task-level postprocessing steps, comma-separated (Default: JobPostProcessing)')
125 parser.add_option(
'',
'--srcdqtag', dest=
'srcdqtag', default=
'nominal', help=
'source DQ tag (default: nominal)')
126 parser.add_option(
'',
'--dqtag', dest=
'dqtag', default=
'HEAD', help=
'beam spot DQ tag')
127 parser.add_option(
'',
'--destdqdbname', dest=
'destdqdbname', default=
'CONDBR2', help=
'DQ destination database instance name (default: CONDBR2)')
128 parser.add_option(
'',
'--srcdqdbname', dest=
'srcdqdbname', default=
'IDBSDQ', help=
'DQ source database instance name (default: IDBSDQT)')
129 parser.add_option(
'',
'--dslist', dest=
'dslist', default=
'', help=
'Exclude running cmd with runCmd if the dataset is not in this file')
130 parser.add_option(
'',
'--nominbias', dest=
'nominbias', action=
'store_true', default=
False, help=
'overwrite MinBias_physics in DSNAME with express_express')
131 parser.add_option(
'',
'--resultsondisk', dest=
'resultsondisk', action=
'store_true', default=
False, help=
'Leave the results on disk when archiving')
132 parser.add_option(
'',
'--pLbFile', dest=
'pseudoLbFile', default=
None, help=
'File for pseudo LB info from scan')
133 parser.add_option(
'',
'--prefix', dest=
'prefix', default=
'', help=
'Prefix for reading files from mass storage (Default: determine from filename (`\'\') ')
134 parser.add_option(
'',
'--rl', dest=
'runMin', type=
'int', default=
None, help=
'Minimum run number for mctag (inclusive)')
135 parser.add_option(
'',
'--ru', dest=
'runMax', type=
'int', default=
None, help=
'Maximum run number for mctag (inclusive)')
136 parser.add_option(
'',
'--useRun', dest=
'useRun', type=
'int', default=
None, help=
'Run monitoring job for a given run only')
137 parser.add_option(
'',
'--noCheckAcqFlag', dest=
'noCheckAcqFlag', action=
'store_true', default=
False, help=
'Don\'t check acqFlag when submitting VdM jobs')
138 parser.add_option(
'',
'--resubAll', dest=
'resubAll', action=
'store_true', default=
False, help=
'Resubmit all jobs irrespective of status')
139 parser.add_option(
'',
'--resubRunning', dest=
'resubRunning', action=
'store_true', default=
False, help=
'Resubmit all "running" jobs')
140 parser.add_option(
'-q',
'--queue', dest=
'batch_queue', default=
None, help=
'Name of batch queue to use (default is context-specific)')
142 g_deprecated = OptionGroup(parser,
'Deprecated Options')
143 g_deprecated.add_option(
'',
'--mon', dest=
'legacy_mon', action=
'store_true', default=
False, help=
'mon directory structure (now inferred from montaskname)')
144 parser.add_option_group(g_deprecated)
146 (options, args) = parser.parse_args()
147 if len(args) < 1: parser.error(
'wrong number of command line arguments')
152 if not options.expertmode:
153 if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
154 sys.exit(
'ERROR: You must run this command in the production directory %s' % options.proddir)
155 if not os.path.exists(produserfile):
156 sys.exit(
'ERROR: Authorization file unreadable or does not exists %s' % produserfile)
157 if not subprocess.getoutput(
'grep `whoami` %s' % produserfile):
158 sys.exit(
'ERROR: You are not authorized to run this command (user name must be listed in produser file %s)' % produserfile)
160 if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
161 print (
'WARNING: You are not running in the production directory %s' % options.proddir)
169 with open(tier0dbinfofile,
'r')
as dbinfofile:
170 connstring = dbinfofile.read().strip()
172 sys.exit(
'ERROR: Unable to read connection information for Tier-0 database')
173 dbtype, dbname = connstring.split(
':',1)
175 sys.exit(
'ERROR: Invalid T0 connection string')
178 oracle = cx_Oracle.connect(dbname)
180 print (
'ERROR: First connection attempt to Tier-0 Oracle database failed; will retry in 10s ...')
183 oracle = cx_Oracle.connect(dbname)
184 except Exception
as e:
186 sys.exit(
'ERROR: Unable to connect to Tier-0 Oracle database')
188 sys.exit(
'ERROR: Unable to connect to Tier-0 Oracle database (invalid cx_Oracle connection)')
192 ''' Open task manager (used by all following commands, at the very least through subcommands) '''
194 return TaskManager(options.dbconn)
196 print (
'ERROR: Unable to access task manager database %s' % options.dbconn)
201 print (
'ERROR:', message)
205 ''' Given a run number and tag, check input dataset and work out name. '''
206 fs = DiskUtils.FileSet.from_ds_info(run,
207 project=options.project,
208 stream=options.stream,
209 base=options.eospath)
212 .use_files_from(options.filelist)
213 .matching(options.filter + tag +
'.*')
214 .excluding(
r'.*\.TMP\.log.*')
215 .only_single_dataset())
216 dataset = os.path.dirname(datasets[0])
217 dsname =
'.'.
join(os.path.basename(datasets[0]).
split(
'.')[:3])
218 return (dataset, dsname)
220 def run_jobs(script, ds_name, task_name, params, *args):
221 ''' Invoke runJobs.py '''
222 arg_list = [
'runJobs']
223 arg_list.extend(map(str, args))
226 for k,v
in params.items():
228 arg_list.extend([
'--params',
', '.
join(param_args)])
230 arg_list.append(
'--test')
231 arg_list.extend([script, ds_name, task_name])
233 print (subprocess.list2cmdline(arg_list))
234 subprocess.check_call(arg_list)
239 if cmd ==
'upload' and len(cmdargs) == 1:
241 if not options.beamspottag:
242 fail(
'No beam spot tag specified')
244 with open(prodcoolpasswdfile,
'r')
as passwdfile:
245 passwd = passwdfile.read().strip()
247 fail(
'Unable to determine COOL upload password')
250 with open(flaskcoolpasswdfile,
'r')
as flaskpasswdfile:
251 flaskpasswd = flaskpasswdfile.read().strip()
253 fail(
'Unable to determine FLASK upload password')
256 print (
'Beam spot file: ', dbfile)
257 print (
'Uploading to tag: ', options.beamspottag)
258 os.system(
'dumpBeamSpot.py -d %s -t %s %s' % (
264 stat = os.system(
'/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (
266 '--batch' if options.batch
else '',
267 (
'--ignoremode %s' % options.ignoremode)
if options.ignoremode
else '',
275 if stat:
fail(
"UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!")
281 if cmd==
'run' and len(args)==3:
286 run_jobs(options.runjoboptions, dsname,
287 '{}-LR5.{}'.
format(options.runtaskname, tag),
291 '--files-per-job', 0,
292 '--match', options.filter,
293 '--exclude',
r'.*\.TMP\.log.*',
294 '--directory', dataset)
301 if cmd==
'runMon' and len(args)==3:
305 if not options.beamspottag:
306 fail(
'No beam spot tag specified')
312 if 'ESD' in options.filter:
315 run_jobs(options.monjoboptions, dsname,
316 '{}.{}'.
format(options.montaskname, tag),
318 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
319 'useBeamSpot' :
True,
320 'beamspottag' : options.beamspottag,
323 '--match', options.filter,
324 '--exclude',
r'.*\.TMP\.log.*',
325 '--directory', dataset)
326 elif options.filelist !=
None:
328 for line
in open(options.filelist,
'r'):
330 lbinfoinfiles =
False
332 lboptions=
'--lbperjob=10' if lbinfoinfiles
else '--files-per-job=10'
333 run_jobs(options.monjoboptions, dsname,
'{}.{}'.
format(options.montaskname, tag),
335 'tracksAlreadyOnBeamLine' :
True,
336 'useBeamSpot' :
True,
337 'beamspottag' : options.beamspottag,
340 '--match', options.filter,
341 '--exclude',
r'.*\.TMP\.log.*',
342 '--directory', dataset,
343 '--queue',
'"tomorrow"')
345 queue = options.batch_queue
or '"tomorrow"'
346 print(
'Queue: ', queue )
349 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
350 'useBeamSpot' :
True,
351 'beamspottag' : options.beamspottag
355 for s
in options.params.split(
', '):
359 params[p[0].strip()] = eval(p[1].strip())
361 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
363 run_jobs(options.monjoboptions, dsname,
'{}.{}'.
format(options.montaskname, tag),params,
365 '--match', options.filter,
366 '--exclude',
r'.*\.TMP\.log.*',
367 '--directory', dataset,
376 if cmd==
'backup' and len(args)==2:
377 if options.archive
and options.incremental:
378 sys.exit(
'ERROR: Cannot do incremental archiving')
379 tmplist = glob.glob(args[1]+
'/')
381 for dslash
in tmplist:
383 if options.incremental:
384 baklog = d+
'/backup.log'
385 if os.path.exists(baklog):
386 cmd =
'find %s -maxdepth 1 -newer %s' % (d,baklog)
387 (status,output) = subprocess.getstatusoutput(cmd)
390 sys.exit(
"ERROR: Error executing %s" % cmd)
398 print (
'\nFound %i directories for backup:\n' % len(dirlist))
402 print (
'\n****************************************************************')
403 print (
'WARNING: ARCHIVING - DIRECTORIES WILL BE DELETED AFTER BACKUP!!!')
404 print (
'****************************************************************')
405 if not options.batch:
406 a = input(
'\nARE YOU SURE [n] ? ')
408 sys.exit(
'ERROR: Aborted by user')
412 outname = d.replace(
'/',
'-')+time.strftime(
'-%G_%m_%d.tar.gz')
415 outname = d.replace(
'/',
'-')+
'.tar.gz'
418 print (
'\nBacking up %s --> %s/%s ...\n' % (d,path,outname))
420 status = os.system(
'tar czf %s/%s %s' % (tmpdir,outname,d)) >> 8
422 sys.exit(
'\nERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
424 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
427 os.system(
'rm -rf %s/%s' % (path,outname) )
428 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
431 print (
'\nERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
434 os.system(
'rm %s/%s' % (tmpdir,outname))
435 status = os.system(
'echo "`date` %s/%s" >> %s/backup.log' % (path,outname,d)) >> 8
437 sys.exit(
'\nERROR: Could not update backup log file')
440 print (
'\nWARNING: DELETION OF SOURCE FILES NOT YET IMPLEMENTED\n')
447 if cmd ==
'lsbackup' and not cmdargs:
448 path = archivepath
if options.archive
else backuppath
449 print (
'Backup directory:', path)
450 for f
in DiskUtils.from_directory(path):
458 if cmd ==
'show' and len(cmdargs)==1:
461 print (
'Base path: ', options.eospath)
462 print (
'Project tag: ', options.project)
463 print (
'Stream: ', options.stream)
465 print (
'Files available (filtered by: {}):'.
format(options.filter))
466 print (
'---------------')
468 fs = DiskUtils.FileSet.from_ds_info(run,
469 project=options.project,
470 stream=options.stream,
471 base=options.eospath)
472 for f
in fs.matching(options.filter):
476 print (
'Beam spot tasks:')
477 print (
'---------------')
479 for t
in taskman.taskIterDict(qual=[
"where DSNAME like '%%%s%%' order by UPDATED" % run]):
480 print (
'%-45s %-45s %4s job(s), last update %s' % (t[
'DSNAME'],t[
'TASKNAME'],t[
'NJOBS'],time.ctime(t[
'UPDATED'])))
487 if cmd ==
'postproc' and not cmdargs:
489 for t
in taskman.taskIterDict(
490 qual=[
'where STATUS > %i and STATUS <= %i order by UPDATED' % (
491 TaskManager.StatusCodes[
'RUNNING'],
492 TaskManager.StatusCodes[
'POSTPROCESSING'])]):
496 if cmd ==
'postproc' and len(cmdargs)
in [2,3]:
498 taskname = cmdargs[1]
503 steps = options.postprocsteps
if len(cmdargs) < 3
else cmdargs[2].
split(
',')
505 print (
'Executing postprocessing tasks:', steps)
507 print (
'Executing postprocessing tasks as specified in task database')
515 confirmWithUser=
not options.batch,
516 addWildCards=
not options.nowildcards)
517 except TaskManagerCheckError
as e:
519 for taskName
in taskList:
520 t = taskman.getTaskDict(taskName[0], taskName[1])
531 if cmd==
'upload' and len(args)==3:
534 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
535 except TaskManagerCheckError
as e:
538 if not options.beamspottag:
539 sys.exit(
'ERROR: No beam spot tag specified')
541 dbfile = glob.glob(
'%s/%s/*-beamspot.db' % (dsname,task))
543 print (
'ERROR: Missing or ambiguous input COOL file:',dbfile)
547 with open(prodcoolpasswdfile,
'r')
as passwdfile:
548 passwd = passwdfile.read().strip()
550 sys.exit(
'ERROR: Unable to determine COOL upload password')
553 with open(flaskcoolpasswdfile,
'r')
as flaskpasswdfile:
554 flaskpasswd = flaskpasswdfile.read().strip()
556 fail(
'Unable to determine FLASK upload password')
558 print (
'\nData set: ',dsname)
559 print (
'Beam spot file: ',dbfile[0])
560 print (
'Uploading to tag: ',options.beamspottag)
561 os.system(
'dumpBeamSpot.py -d %s -t %s %s' % (options.srcdbname,options.srctag,dbfile[0]))
563 if options.ignoremode:
564 ignoremode =
'--passopt="--appendlocked --ignoremode %s"' % options.ignoremode
568 batchmode =
'--batch'
572 print(
'command: /afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret <flaskpassword> --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W <password>' % (batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname))
573 stat = os.system(
'/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (flaskpasswd,batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname,passwd))
576 print (
"\n\nERROR: UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
580 uploadflag = dbfile[0]+
'.uploaded'
581 stat = os.system(
'echo "`date` %s" >> %s' % (options.beamspottag,uploadflag) )
583 print (
"ERROR: Uploading was successful, but unable to set upload flag", uploadflag )
585 cooltags = options.beamspottag
593 nextbeamspot = COOLUtils.resolveNextBeamSpotFolder()
596 if nextbeamspot == options.beamspottag:
598 if not (
'UPD' in options.beamspottag
and 'UPD' in nextbeamspot):
600 if nextbeamspot !=
'':
601 print (
'Additionally uploading to Next tag: ',nextbeamspot)
605 t = taskman.getTaskDict(dsname,task)
606 taskman.setValue(dsname,task,
'COOLTAGS',
appendUnique(t[
'COOLTAGS'],cooltags))
614 if cmd==
'dq2get' and len(args)==3:
617 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,addWildCards=
not options.nowildcards)
618 except TaskManagerCheckError
as e:
621 dir = os.path.join(dsname, task)
622 griddsname =
'%s.%s-%s' % (options.griduser,dsname,task)
623 path = os.path.join(dir, griddsname)
624 if os.path.exists(path):
625 print (
'ERROR: Path exists already:',path)
627 print (
'Extracting into %s/%s ...' % (dir,griddsname))
628 stat = os.system(
'cd %s; dq2-get -T 6,6 %s' % (dir,griddsname))
630 print (
"ERROR: Problem occurred while extracting data set - task status not changed")
632 statfile = glob.glob(
'%s/000/*.status.SUBMITTED' % (dir))
634 print (
"ERROR: Unable to uniquely identfying status - giving up. Candidate status files are:")
637 os.system(
'rm %s' % statfile[0])
638 basename = statfile[0][:-17]
639 os.system(
'touch %s.exit.0' % basename)
640 os.system(
'touch %s.COMPLETED' % basename)
641 print (
"\nSuccessfully downloaded data set",griddsname)
642 os.system(
'du -hs %s' % path)
650 if cmd==
'queryT0' and len(args)==3:
653 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,addWildCards=
not options.nowildcards)
654 except TaskManagerCheckError
as e:
657 tags = task.split(
'.')[-1]
659 if 'ESD' in options.filter:
660 t0TaskName =
'%s.recon.ESD.%s.beamspotproc.task' % (dsname,tags)
662 if any(t[0] ==
'm' for t
in tags.split(
'_')):
663 t0TaskName =
'%s.merge.AOD.%s.beamspotproc.task' % (dsname,tags)
665 t0TaskName =
'%s.recon.AOD.%s.beamspotproc.task' % (dsname,tags)
667 print (
'Querying Tier-0 database for task',t0TaskName,
'...')
669 cur = oracle.cursor()
672 sql =
str(
"SELECT status FROM tasks WHERE taskname='%s' AND tasktype='beamspotproc'" % t0TaskName)
675 except Exception
as e:
677 sys.exit(
'ERROR: Unable to retrieve status of task %s' % t0TaskName)
679 sys.exit(
'ERROR: No such task found: %s' % t0TaskName)
681 sys.exit(
'ERROR: %i tasks found - unable to determine task status' % len(r))
683 print (
'\nTask status = %s\n' % status)
684 if status==
'FINISHED' or status==
'TRUNCATED':
693 if cmd==
'runCmd' and len(args)==4:
697 if options.nowildcards:
698 qual = [
"where DSNAME like '%s' and TASKNAME like '%s'" % (dssel,tasksel)]
700 qual = [
"where DSNAME like '%%%s%%' and TASKNAME like '%%%s%%'" % (dssel,tasksel)]
701 if options.beamspottag:
702 qual.append(
"and COOLTAGS like '%%%s%%'" % options.beamspottag)
704 qual.append(
"order by RUNNR")
705 print (
'Running command\n')
706 print (
' ',cmd % ({
'DSNAME':
'DSNAME',
'TASKNAME':
'TASKNAME',
'RUNNR':
'RUNNR',
'FULLDSNAME':
'FULLDSNAME'}))
707 print (
'\nover the following datasets / tasks:\n')
708 print (
' %-10s %-40s %s' % (
'Run',
'Dataset',
'Task'))
709 print (
' %s' % (74*
'-'))
713 for t
in taskman.taskIterDict(
'*',qual):
717 if options.nominbias
and dsname.find(
'physics_MinBias') :
718 print (
'Warning: changing physics_MinBias in dsname to express_express')
719 dsname = dsname.replace(
'physics_MinBias',
'express_express')
723 taskname = t[
'TASKNAME']
724 print (
' %-10s %-40s %s'% (runnr,dsname,taskname))
726 if options.excludeiftask:
727 if options.nowildcards:
729 n = taskman.getNTasks([
"where DSNAME ='%s' and TASKNAME like '%s'" % (dsname,options.excludeiftask)])
731 n = taskman.getNTasks([
"where DSNAME ='%s' and TASKNAME like '%%%s%%'" % (dsname,options.excludeiftask)])
733 print (
' ==> SKIPPING - %i matching tasks found' % n)
736 if options.excludeds:
737 excludeList = options.excludeds.split(
',')
738 for s
in excludeList:
740 print (
' ==> SKIPPING dataset')
744 stat, out = subprocess.getstatusoutput(
'grep -c %s %s' % (dsname, options.dslist))
751 print (
' ==> SKIPPING - dataset %s not found in %s' % (dsname, options.dslist))
759 stat, out = subprocess.getstatusoutput(
'grep %s %s' % (dsname, options.dslist))
764 t[
'FULLDSNAME'] = out.strip()
769 print (
'\nNo jobs need to be run.\n')
772 print (
'\n%i datasets / tasks selected.\n' % len(taskList))
773 if not options.batch:
774 a = input(
'\nARE YOU SURE [n] ? ')
776 sys.exit(
'ERROR: Running of monitoring tasks aborted by user')
780 print (
'\nExecuting: ',fullcmd,
' ...')
791 if cmd==
'runMonJobs' and len(args)<3:
793 earliestUpdateTime = time.time()-
float(args[1])
795 earliestUpdateTime = 0
796 if not options.beamspottag:
797 sys.exit(
'ERROR: No beam spot tag specified')
799 print (
'Running the following monitoring tasks for tasks of type %s:\n' % options.runtaskname)
800 print (
' %-10s %s' % (
'RUN',
'MONITORING TASK'))
801 print (
' %s' % (60*
'-'))
803 onDiskCode = TaskManager.OnDiskCodes[
'ALLONDISK']
806 for t
in taskman.taskIterDict(
'*',[
"where TASKNAME like '%s%%'" % options.runtaskname,
807 "and UPDATED >= ", DbParam(earliestUpdateTime),
808 "and ONDISK = ", DbParam(onDiskCode),
809 "and COOLTAGS like '%%%s%%'" % options.beamspottag,
810 "order by RUNNR desc"]):
813 taskName = t[
'TASKNAME']
814 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
815 monTaskName =
'MON.%s.%s' % (taskName,datatag)
817 if options.useRun
is not None:
819 if runnr == options.useRun:
824 m =
next(taskman.taskIterDict(
'*',[
"where RUNNR =",DbParam(runnr),
"and DSNAME =",DbParam(dsname),
"and TASKNAME =",DbParam(monTaskName),
"order by UPDATED desc"]))
825 print (
' %-10s %s %s'% (runnr,dsname,monTaskName))
827 print (
' * %-10s %s %s'% (runnr,dsname,
'--- no monitoring task found ---'))
832 print (
'\nNo jobs need to be run.\n')
835 if not options.batch:
836 a = input(
'\nARE YOU SURE [n] ? ')
838 sys.exit(
'ERROR: Running of monitoring tasks aborted by user')
845 ptag = dsname.split(
'.')[0]
846 stream = dsname.split(
'.')[2]
847 taskName = t[
'TASKNAME']
848 fulldatatag = taskName.split(
'.')[-1]
849 datatag = fulldatatag.split(
'_')[0]
850 monTaskName =
'MON.%s' % (taskName)
854 cooltags = t[
'COOLTAGS']
855 if not cooltags: cooltags =
''
856 bstag = cooltags.split()[0]
859 if any(t[0] ==
'm' for t
in fulldatatag.split(
'_')):
860 t0dsname =
'%s.merge.AOD.%s%%' % (dsname, datatag)
862 t0dsname =
'%s.recon.AOD.%s%%' % (dsname, datatag)
865 if 'ESD' in c[
'inputfiles'][0]:
867 t0dsname =
'%s.recon.ESD.%s' % (dsname, datatag)
869 print (
'\nRunning monitoring job for run %s:' % runnr)
872 eospath=options.eospath
876 if int(runnr)<240000:
877 print (
'... Querying T0 database for replication of %s' % t0dsname)
878 cur = oracle.cursor()
879 cur.execute(
"select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
882 print (
' WARNING: input data not yet replicated - please retry later')
885 print (
'... Querying T0 database for completion of merging jobs of %s' % t0dsname)
886 cur = oracle.cursor()
887 origt0TaskName=
'%s.recon.AOD.%s%%.aodmerge.task' % (dsname,datatag)
888 cur.execute(
"select status from tasks where taskname like '%s' and tasktype='aodmerge'" % origt0TaskName)
891 print (
' WARNING: can\'t get status of merge job for %s, running on un-merged samples instead' % origt0TaskName)
892 eospath=
'/eos/atlas/atlastier0/tzero/prod'
893 elif not (r[0][0]==
'FINISHED' or r[0][0]==
'TRUNCATED'):
894 print (
' Merge job for taskname %s is not finished yet, has status %s, running on un-merged samples instead.' % (origt0TaskName, r[0][0]))
895 eospath=
'/eos/atlas/atlastier0/tzero/prod'
897 print (
' Merge job is finished, launching jobs.')
901 if int(runnr)<240000:
903 print (
'... Submitting monitoring task')
904 queue = options.batch_queue
or '\'\"tomorrow\"\''
907 paramValues =
'--params \''+options.params+
'\''
912 cmd =
'beamspotman --eospath=%s -p %s -s %s -f \'.*\\.%s\\..*\' -t %s --queue %s %s %s --montaskname %s runMon %i %s' % (eospath,ptag,stream,filter,bstag,queue,paramValues,testFlag,monTaskName,
int(runnr),datatag)
915 status = os.system(cmd) >> 8
917 print (
'\nERROR: Job submission returned error - exit code %i\n')
926 if cmd==
'archive' and len(args)==3:
927 if not options.batch:
928 print (
'\nWARNING: If you confirm below, each of the following datasets will:')
929 print (
' - be archived to EOS')
930 if options.resultsondisk:
931 print (
' - will be marked as RESULTSONDISK in the task database')
932 print (
' - all except the results files *** WILL BE DELETED ***')
934 print (
' - will be marked as ARCHIVED in the task database')
935 print (
' - all its files *** WILL BE DELETED ***')
940 taskList =
getFullTaskNames(taskman,args[1],args[2],confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
941 except TaskManagerCheckError
as e:
947 onDiskCode = TaskManager.OnDiskCodes.get(
'ALLONDISK',
None)
948 archivedCode = TaskManager.OnDiskCodes.get(
'RESULTSONDISK',
None)
if options.resultsondisk
else TaskManager.OnDiskCodes.get(
'ARCHIVED',
None)
949 exceptList = [
'*dqflags.txt',
'*.gif',
'*.pdf',
'*.config.py*',
'*.argdict.gpickle',
'*.AveBeamSpot.log',
'*.PlotBeamSpotCompareReproc.log',
'*.sh',
'*.BeamSpotNt*',
'*.BeamSpotGlobalNt.log',
'*.status.*',
'*.exit.*']
951 for (dsname,taskname)
in taskList:
952 t = taskman.getTaskDict(dsname,taskname)
955 if t[
'ONDISK'] != onDiskCode:
956 print (
'Skipping task %s / %s status %s (task files must be on disk)' % (dsname,taskname,
getKey(TaskManager.OnDiskCodes,t[
'ONDISK'])))
960 dir =
'%s/%s' % (dsname,taskname)
961 outname = dir.replace(
'/',
'-')+time.strftime(
'-%G_%m_%d.tar.gz')
962 print (
'Archiving task %s / %s ...' % (dsname,taskname))
963 print (
' --> %s/%s ...' % (path,outname))
966 if dir==
'.' or dir==
'*':
967 print (
'\n**** FATAL ERROR: Very dangerous value of task directory found: %s - ABORTING' % dir)
971 if os.path.exists(dir):
972 status = os.system(
'tar czf %s/%s %s' % (tmpdir,outname,dir)) >> 8
974 sys.exit(
'\n**** ERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
975 status = os.system(
'xrdcp %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
977 os.system(
'rm -rf %s/%s' % (path,outname) )
978 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
980 sys.exit(
'\n**** ERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
982 os.system(
'rm %s/%s' % (tmpdir,outname))
983 n = taskman.setValue(dsname,taskname,
'ONDISK',archivedCode)
985 sys.exit(
'\n**** ERROR: Unexpected number of tasks modified: %i instead of 1 (DSNAME=%s,TASKNAME=%s)' % (n,dsname,taskname))
987 if options.resultsondisk:
988 oscmd =
r"find %s ! \( -name '%s' \) -type f -exec rm {} \;" % (dir,
"' -or -name '".
join(exceptList))
991 os.system(
'rm -rf %s' % dir)
993 print (
'\n**** ERROR: No task directory',dir,
'\n')
1003 if cmd==
'resubmit' and len(args)
in [3,4]:
1011 queue = args[3]
if len(args) == 4
else options.batch_queue
1013 print (
'ERROR: No queue was specified (use -q)')
1016 basepath = os.path.join(os.getcwd(), dsname, taskname)
1017 dircontents = os.listdir(basepath)
1019 condorScriptTemplate=
"""executable = %(scriptfile)s
1020 arguments = $(ClusterID) $(ProcId)
1021 output = %(logfile)s.out
1022 error = %(logfile)s.err
1023 log = %(logfile)s.log
1025 +JobFlavour = %(batchqueue)s
1030 for dir
in dircontents:
1031 if not os.path.isdir(os.path.join(basepath, dir)):
1035 if (options.montaskname
in taskname.split(
'.'))
or options.legacy_mon:
1036 jobname =
'-'.
join([dsname, taskname,
'lb' + dir])
1037 fullpath = os.path.join(basepath, dir)
1041 for f
in os.listdir(fullpath):
1042 if re.search(
'RUNNING', f):
1044 if re.search(
'COMPLETED',f)
or re.search(
'POSTPROCESSING',f):
1045 with open(os.path.join(fullpath, jobname +
'.exitstatus.dat'))
as statusFile:
1046 status = statusFile.read(1)
1051 if options.resubRunning
and isRunning:
1052 print (
"Will resubmit running job")
1053 elif (isRunning
or not isFailed)
and not options.resubAll:
1057 for f
in os.listdir(fullpath):
1058 if re.search(
'.exitstatus.', f):
1059 os.remove(os.path.join(fullpath, f))
1060 elif re.search(
'.exit.', f):
1061 os.remove(os.path.join(fullpath, f))
1062 elif re.search(
'.status.', f):
1063 os.remove(os.path.join(fullpath, f))
1064 elif re.search(
'.log', f):
1065 os.remove(os.path.join(fullpath, f))
1066 elif re.search(
'.py.final.py', f):
1067 os.remove(os.path.join(fullpath, f))
1071 'batchqueue' : queue,
1072 'jobname' : jobname,
1073 'jobdir' : fullpath,
1075 jobConfig[
'logfile'] =
'%(jobdir)s/%(jobname)s.log' % jobConfig
1076 jobConfig[
'scriptfile'] =
'%(jobdir)s/%(jobname)s.sh' % jobConfig
1078 condorScript = condorScriptTemplate % jobConfig
1079 print (condorScript)
1080 script =
open(
'condorSubmit.sub',
'w')
1081 script.write(condorScript)
1083 os.chmod(
'condorSubmit.sub',0o755)
1085 batchCmd =
'condor_submit condorSubmit.sub'
1091 print (taskman.getStatus(dsname, taskname))
1092 taskman.setStatus(dsname, taskname, TaskManager.StatusCodes[
'RUNNING'] )
1101 if cmd==
'reproc' and len(args)==5:
1102 from InDetBeamSpotExample
import HTCondorJobRunner
1109 lbperjob = options.lbperjob
1110 params = {
'LumiRange': lbperjob}
1111 cmd =
' '.
join(sys.argv)
1115 for s
in options.params.split(
', '):
1119 params[p[0].strip()] = eval(p[1].strip())
1121 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
1132 backend = DiskUtils.EOS()
if options.eos
else None
1133 fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1134 fs = fs.matching(options.filter).use_files_from(options.filelist).only_latest(options.removedups)
1135 for f, lbs
in file_set.with_lumi_blocks(options.lbfilemap):
1138 if not files:
fail(
'No files were found.')
1147 jobFileDict[jobId] = files
1148 jobLBDict[jobId] = []
1154 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1156 jobLBDict[jobId].
extend(lbs)
1160 lbs =
sorted(lbMap[f.split(
'/')[-1]])
1162 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1165 jobId =
int((lbnr-1)/lbperjob)
1167 if not jobId
in jobFileDict:
1168 jobFileDict[jobId] = [f]
1169 jobLBDict[jobId] = [lbnr]
1171 if not f
in jobFileDict[jobId]:
1172 jobFileDict[jobId].
append(f)
1173 jobLBDict[jobId].
append(lbnr)
1177 for i
in sorted(jobFileDict.keys()):
1178 jobnr = i*lbperjob+1
1179 files=jobFileDict[i]
1185 intlbs.append(
int(lbnr))
1186 params[
'lbList'] = intlbs
1187 jobname=dsname+
'-'+taskname+
'-lb%03i' % jobnr
1189 queue = options.batch_queue
or '"tomorrow"'
1190 runner = HTCondorJobRunner.HTCondorJobRunner(
1192 jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1196 joboptionpath=jobopts,
1197 filesperjob=len(files),
1199 addinputtopoolcatalog=
True,
1200 taskpostprocsteps=
'ReprocVertexDefaultProcessing',
1202 autoconfparams=
'DetDescrVersion',
1203 returnstatuscode=
True,
1210 except Exception
as e:
1211 print (
"ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1212 print (
"DEBUG: Exception =",e)
1214 taskman.addTask(dsname, taskname, jobopts, runner.getParam(
'release'), runner.getNJobs(), runner.getParam(
'taskpostprocsteps'), comment=cmd)
1225 if cmd==
'runaod' and len(args)==5:
1227 from InDetBeamSpotExample
import HTCondorJobRunner
1233 lbperjob = options.lbperjob
1234 params = {
'LumiRange': lbperjob}
1237 if options.pseudoLbFile:
1238 params = {
'LumiRange': 1}
1240 cmd =
' '.
join(sys.argv)
1244 for s
in options.params.split(
', '):
1248 params[p[0].strip()] = eval(p[1].strip())
1250 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
1253 backend = DiskUtils.EOS()
if options.eos
else None
1254 fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1255 print (
"****************************************************")
1256 print (
"*************** printing files *********************")
1258 fs = fs.matching(options.filter)
1259 for f, lbs
in fs.with_lumi_blocks(options.lbfilemap):
1263 if not files:
fail(
'No files were found.')
1270 if options.pseudoLbFile:
1273 coolQuery = COOLQuery()
1274 from InDetBeamSpotExample.Utils
import getRunFromName
1278 with open(options.pseudoLbFile)
as pLbFile:
1279 for line
in pLbFile:
1280 if line[0] ==
'#':
continue
1282 tokens = line.split()
1283 plbnr,tstart,tend =
int(tokens[0]),
int(tokens[1]),
int(tokens[2])
1284 jobId =
int(plbnr/lbperjob)
1288 if not options.noCheckAcqFlag
and len(tokens)>=5
and abs(
float(tokens[4])-1.)>0.001:
1289 print (
"Point is not stationary -- skipping job %d" % jobId)
1293 rlbs = [lb
for (lb,time)
in lbTimes.items()
if (time[0] - tend/1e9)*(time[1] - tstart/1e9) < 0]
1301 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1303 if not sum([lb
for lb
in lbs
if lb
in rlbs]):
continue
1307 jobLBDict[jobId].
extend([lb
for lb
in rlbs
if not lb
in jobLBDict[jobId]])
1308 jobFileDict[jobId].
extend([f
for f
in filenames
if not f
in jobFileDict[jobId]])
1309 jobParams[jobId][
'lbData'].
append(line.strip(
'\n').strip())
1311 jobLBDict[jobId] = rlbs
1312 jobFileDict[jobId] = filenames
1313 jobParams[jobId] = {
'lbData' : [line.strip(
'\n').strip()]}
1320 print (
'WARNING: No mapping for file %s. Skipping' % f.split(
'/')[-1])
1325 jobId =
int((lbnr-1)/lbperjob)
1327 if not jobId
in jobFileDict:
1328 jobFileDict[jobId] = [f]
1329 jobLBDict[jobId] = [lbnr]
1331 if not f
in jobFileDict[jobId]:
1332 jobFileDict[jobId].
append(f)
1333 jobLBDict[jobId].
append(lbnr)
1337 for i
in sorted(jobFileDict.keys()):
1338 jobnr = i*lbperjob+1
1339 files=jobFileDict[i]
1344 intlbs.append(
int(lbnr))
1345 params[
'lbList'] = intlbs
1350 for k,v
in p.items(): params[k] = v
1354 jobname=dsname+
'-'+taskname+
'-lb%03i' % jobnr
1356 queue = options.batch_queue
1359 queue=
'"tomorrow"' if options.pseudoLbFile
else '"tomorrow"'
1360 runner = HTCondorJobRunner.HTCondorJobRunner(
1362 jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1366 joboptionpath=jobopts,
1367 filesperjob=len(files),
1369 addinputtopoolcatalog=
True,
1370 taskpostprocsteps=
' '.
join(options.postprocsteps),
1371 autoconfparams=
'DetDescrVersion',
1372 returnstatuscode=
True,
1375 if options.testonly:
1380 except Exception
as e:
1381 print (
"ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1382 print (
"DEBUG: Exception =",e)
1384 taskman.addTask(dsname, taskname, jobopts, runner.getParam(
'release'), runner.getNJobs(), runner.getParam(
'taskpostprocsteps'), comment=cmd)
1394 if cmd==
'dqflag' and len(args)==2:
1396 if not options.dqtag:
1397 sys.exit(
'ERROR: No beamspot DQ tag specified')
1399 with open(proddqcoolpasswdfile,
'r')
as passwdfile:
1400 passwd = passwdfile.read().strip()
1402 sys.exit(
'ERROR: Unable to determine DQ COOL upload password')
1404 print (
'\nBeam spot DQ file: ',dbfile)
1405 print (
'Uploading to tag: ',options.dqtag)
1407 if options.ignoremode:
1408 ignoremode =
'--ignoremode %s' % options.ignoremode
1413 batchmode =
'--batch'
1418 cmd =
'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1421 stat = os.system(cmd)
1427 print (
"\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1434 if cmd==
'dqflag' and len(args)==3:
1437 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
1438 except TaskManagerCheckError
as e:
1442 if not options.dqtag:
1443 sys.exit(
'ERROR: No beam spot DQ tag specified')
1445 dbfile = glob.glob(
'%s/%s/*-dqflags.db' % (dsname,task))
1447 print (
'ERROR: Missing or ambiguous input COOL DQ file:',dbfile)
1451 with open(proddqcoolpasswdfile,
'r')
as passwdfile:
1452 passwd = passwdfile.read().strip()
1454 sys.exit(
'ERROR: Unable to determine DQ COOL upload password')
1456 print (
'\nData set: ',dsname)
1457 print (
'Beam spot DQ file: ',dbfile[0])
1458 print (
'Uploading to tag: ',options.dqtag)
1460 if options.ignoremode:
1461 ignoremode =
'--ignoremode %s' % options.ignoremode
1465 batchmode =
'--batch'
1469 cmd =
'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1470 print (
"Running %s" % cmd)
1471 stat = os.system(cmd)
1476 print (
"\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1480 uploadflag = dbfile[0]+
'.uploaded'
1481 stat = os.system(
'echo "`date` %s" >> %s' % (options.dqtag,uploadflag) )
1483 print (
"ERROR: Uploading DQ flag was successful, but unable to set upload flag", uploadflag)
1490 if cmd==
'runBCID' and len(args)==3:
1500 run_jobs(options.bcidjoboptions, dsname, options.bcidtaskname,
1502 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
1503 'SeparateByBCID' :
True,
1504 'VertexNtuple' :
False,
1506 '--files-per-job', 0,
1507 '--match', options.filter,
1508 '--exclude',
r'.*\.TMP\.log.*',
1509 '-postprocsteps',
'BCIDDefaultProcessing')
1516 if cmd==
'runBCIDJobs' and len(args)<3:
1518 earliestUpdateTime = time.time()-
float(args[1])
1520 earliestUpdateTime = 0
1522 if not options.beamspottag:
1523 sys.exit(
'ERROR: No beam spot tag specified')
1525 print (
'Running the following BCID tasks for tasks of type %s:\n' % options.runtaskname)
1526 print (
' %-10s %s' % (
'RUN',
'BCID TASK'))
1527 print (
' %s' % (60*
'-'))
1531 for t
in taskman.taskIterDict(
'*',[
"where TASKNAME like '%s%%'" % options.runtaskname,
1532 "and UPDATED >= ", DbParam(earliestUpdateTime),
1533 "and COOLTAGS like '%%%s%%'" % options.beamspottag,
1534 "order by RUNNR desc"]):
1535 dsname = t[
'DSNAME']
1537 taskName = t[
'TASKNAME']
1538 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
1539 bcidTaskName =
'BCID.%s.%s' % (taskName,datatag)
1542 m =
next(taskman.taskIterDict(
'*',[
'where RUNNR =',DbParam(runnr),
'and DSNAME =',DbParam(dsname),
'and TASKNAME =',DbParam(bcidTaskName),
'order by UPDATED desc']))
1543 print (
' %-10s %s'% (runnr,bcidTaskName))
1545 print (
' * %-10s %s'% (runnr,
'--- no BCID task found ---'))
1550 print (
'\nNo jobs need to be run.\n')
1553 if not options.batch:
1554 a = input(
'\nARE YOU SURE [n] ? ')
1556 sys.exit(
'ERROR: Running of BCID tasks aborted by user')
1561 dsname = t[
'DSNAME']
1563 ptag = dsname.split(
'.')[0]
1564 stream = dsname.split(
'.')[2]
1565 taskName = t[
'TASKNAME']
1566 fulldatatag = taskName.split(
'.')[-1].
split(
'_')[0]
1567 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
1568 bcidTaskName =
'BCID.%s.%s' % (taskName,datatag)
1571 if any(t[0] ==
'm' for t
in fulldatatag.split(
'_')):
1572 t0dsname =
'%s.merge.%s.%s%%' % (dsname, filter, datatag)
1574 t0dsname =
'%s.recon.%s.%s%%' % (dsname, filter, datatag)
1577 if 'ESD' in c[
'inputfiles'][0]:
1579 t0dsname =
'%s.recon.ESD.%s' % (dsname, datatag)
1581 print (
'\nRunning BCID job for run %s:' % runnr)
1583 print (
'... Querying T0 database for replication of %s' % t0dsname)
1584 cur = oracle.cursor()
1585 cur.execute(
"select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
1588 print (
' WARNING: input data not yet replicated - please retry later')
1591 print (
'... Submitting BCID task')
1592 cmd =
'beamspotman.py -p %s -s %s -f \'.*\\.%s\\..*\' --bcidtaskname %s runBCID %i %s' % (ptag,stream,filter,
'BCID.'+taskName,
int(runnr),datatag)
1595 status = os.system(cmd) >> 8
1597 print (
'\nERROR: Job submission returned error - exit code %i\n')
1606 if cmd==
'mctag' and len(args)<12:
1610 if not options.beamspottag:
1611 sys.exit(
'ERROR: No beam spot tag specified')
1613 dbfile=options.beamspottag +
'.db'
1616 runMin = options.runMin
if options.runMin
is not None else 0
1617 runMax = options.runMax
if options.runMax
is not None else (1 << 31)-1
1620 runMin=runMin, runMax=runMax,
1621 status=
int(args[1]),
1624 tiltX=
float(args[8])
if len(args)>8
else 0.,
1625 tiltY=
float(args[9])
if len(args)>9
else 0.,
1626 sigmaXY=
float(args[10])
if len(args)>10
else 0.,
1627 posXErr=0., posYErr=0., posZErr=0.,
1628 sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1629 tiltXErr=0., tiltYErr=0.,
1632 print (
'* MC beamspot tag written to db=OFLP200, tag=%s in %s ' %(options.beamspottag, dbfile))
1633 print (
' - AtlCoolConsole.py "sqlite://;schema=' + dbfile +
';dbname=OFLP200"')
1634 print (
'* To upload to oracle use:')
1635 print (
' - beamspotman.py --srctag %s -t %s --srcdbname OFLP200 --destdbname OFLP200 upload %s' %(options.beamspottag, options.beamspottag, dbfile))
1643 if cmd==
'maketag' and len(args)<12:
1647 if not options.beamspottag:
1648 sys.exit(
'ERROR: No beam spot tag specified')
1650 dbfile=options.beamspottag +
'.db'
1651 dbName=options.destdbname
1652 folderHandle =
openBeamSpotDbFile(dbfile, dbName = options.destdbname, forceNew =
True)
1654 runMin = options.runMin
if options.runMin
is not None else 0
1655 runMax = options.runMax
if options.runMax
is not None else (1 << 31)-1
1658 runMin=runMin, runMax=runMax,
1659 status=
int(args[1]),
1662 tiltX=
float(args[8])
if len(args)>8
else 0.,
1663 tiltY=
float(args[9])
if len(args)>9
else 0.,
1664 sigmaXY=
float(args[10])
if len(args)>10
else 0.,
1665 posXErr=0., posYErr=0., posZErr=0.,
1666 sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1667 tiltXErr=0., tiltYErr=0.,
1670 print (
'* Beamspot tag written to db=%s, tag=%s in %s ' %(dbName,options.beamspottag, dbfile))
1671 print (
' - AtlCoolConsole.py "sqlite://;schema=' + dbfile +
';dbname=%s"' %(dbName))
1672 print (
'* To upload to oracle use:')
1673 print (
' - beamspotman.py --srctag %s -t %s --srcdbname %s --destdbname %s upload %s' %(options.beamspottag, options.beamspottag, dbName, dbName, dbfile))
1674 print (
' - /afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_INDET_W <passwd>' %(dbfile, dbName))
1678 print (
'ERROR: Illegal command or number of arguments ({})'.
format(
' '.
join(args)))