5 from __future__
import print_function
8 beamspotman is a command line utility to do typical beam spot related tasks.
10 __authors__ = [
'Juerg Beringer',
'Carl Suster']
11 __version__ =
'beamspotman.py atlas/athena'
12 __usage__ =
'''%prog [options] command [args ...]
16 show RUNNR For given run, show data on CAF and beam spot jobs
17 run RUNNR TAG Run standard beam spot reconstruction job
18 runCmd DSNAME TASKNAME CMD Repeat command for matching tasks (use %(DSNAME)s and
19 %(TASKNAME)s in the command string CMD to specify the
20 dataset and task names, if needed)
21 runMon RUNNR TAG Run standard beam spot monitoring job
22 runMonJobs [MAXTIME] Run monitoring jobs for jobs completed at most MAXTIME ago
23 postproc Default postprocessing for any task that needs it
24 postproc DSNAME TASKNAME [WHATLIST] Postprocessing for a selected set of tasks
25 upload DBFILE Upload SQLite file into COOL (independent of task)
26 upload DSNAME TASKNAME Upload result of beam spot determination into COOL
27 dq2get DSNAME TASKNAME Retrieve task data from grid job (must have set up grid env.)
28 queryT0 DSNAME TASKNAME Query Tier-0 database about a task
29 backup DIR Backup directory DIR (may contain wildcards) to EOS
30 archive DSNAME TASKNAME Archive task data (deletes files after copying to EOS,
31 sets on-disk status to ARCHIVED)
32 lsbackup List contents of backup directory on EOS
33 reproc TEMPLATE DSNAME TASKNAME INPUTDIR Run reprocessing task consisting of several jobs split into 5 LBs
34 over the files in INPUTDIR
35 runaod TEMPLATE DSNAME TASKNAME INPUTDIR Run over AOD, splitting jobs into sets of N LBs (similar
36 to reproc command, except for variable params)
37 resubmit DSNAME TASKNAME Rerun jobs of a specific task (choose queue with -q QUEUE)
38 dqflag DBFILE Upload DQ SQLite file into COOL (independent of task)
39 dqflag DSNAME TASKNAME Upload result of beam spot DQ flag determination into COOL
40 runBCID RUNNR TAG Run standard beam spot BCID job
41 runBCIDJobs [MAXTIME] Run BCID jobs for jobs completed at most MAXTIME ago
42 mctag STATUS POSX POSY POSZ Create an sqlite file containing a MC tag with the
43 SIGMAX SIGMAY SIGMAZ parameters given (and zero errors). A la beamSpot_set.py.
49 proddir =
'/afs/cern.ch/user/a/atlidbs/jobs'
50 produserfile =
'/afs/cern.ch/user/a/atlidbs/private/produsers.dat'
51 prodcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/coolinfo.dat'
52 flaskcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/flaskinfo.dat'
53 proddqcoolpasswdfile =
'/afs/cern.ch/user/a/atlidbs/private/cooldqinfo.dat'
54 tier0dbinfofile =
'/afs/cern.ch/user/a/atlidbs/private/t0dbinfo.dat'
56 backuppath =
'/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/backup'
57 archivepath =
'/eos/atlas/atlascerngroupdisk/phys-beamspot/jobs/archive'
66 from InDetBeamSpotExample.PostProcessing
import doPostProcessing
67 from InDetBeamSpotExample
import BeamSpotPostProcessing
68 from InDetBeamSpotExample
import COOLUtils
69 from InDetBeamSpotExample
import DiskUtils
71 from future
import standard_library
72 standard_library.install_aliases()
75 from optparse
import Option, OptionParser, OptionGroup
77 return re.split(
'\s*,\s*|\s+', value)
79 TYPES = Option.TYPES + (
'commsep',)
80 TYPE_CHECKER =
copy(Option.TYPE_CHECKER)
81 TYPE_CHECKER[
'commsep'] = check_commsep
83 parser = OptionParser(usage=__usage__, version=__version__, option_class=BeamSpotOption)
85 g_input = OptionGroup(parser,
'Input file options')
86 g_input.add_option(
'',
'--eos', dest=
'eos', default=
False, action=
'store_true', help=
'access files over EOS (not needed if /eos is mounted)')
87 g_input.add_option(
'-e',
'--eospath', dest=
'eospath', default=
'/eos/atlas/atlastier0/rucio', help=
'eos path (excluding project and stream name)')
88 g_input.add_option(
'-p',
'--project', dest=
'project', default=
'data17_13TeV', help=
'project name')
89 g_input.add_option(
'-s',
'--stream', dest=
'stream', default=
'calibration_BeamSpot', help=
'stream name')
90 g_input.add_option(
'-f',
'--filter', dest=
'filter', default=
'.*\.AOD\..*', help=
'regular expression to filter input files')
91 g_input.add_option(
'',
'--lbfilemap', dest=
'lbfilemap', default=
'', help=
'text file with mapping between filename and lumi blocks')
92 g_input.add_option(
'',
'--rucio', dest=
'rucio', action=
'store_true', default=
False, help=
'rucio directory structure')
93 g_input.add_option(
'',
'--dpdinput', dest=
'dpdinput', action=
'store_true', default=
False, help=
'Run over DPD for runaod')
94 g_input.add_option(
'-l',
'--filelist', dest=
'filelist', default=
None, help=
'Explicit list of files for reproc command')
95 g_input.add_option(
'',
'--removedups', dest=
'removedups', action=
'store_true', default=
False, help=
'Remove duplicate retry files of form root.N, keeping the latest')
96 parser.add_option_group(g_input)
98 g_mode = OptionGroup(parser,
'Mode flags')
99 g_mode.add_option(
'-b',
'--batch', dest=
'batch', action=
'store_true', default=
False, help=
'batch mode - never ask for confirmation')
100 g_mode.add_option(
'',
'--test', dest=
'testonly', action=
'store_true', default=
False, help=
'for runaod, show only options and input files')
101 g_mode.add_option(
'',
'--expertmode', dest=
'expertmode', action=
'store_true', default=
False, help=
'expert mode (BE VERY CAREFUL)')
102 g_mode.add_option(
'',
'--ignoremode', dest=
'ignoremode', default=
'', help=
'ignore update protection mode (needs expert pwd)')
103 parser.add_option_group(g_mode)
106 parser.add_option(
'-t',
'--beamspottag', dest=
'beamspottag', default=beamspottag, help=
'beam spot tag')
107 parser.add_option(
'-d',
'--dbconn', dest=
'dbconn', default=
'', help=
'task manager database connection string (default: check TASKDB, otherwise use sqlite_file:taskdata.db)')
108 parser.add_option(
'',
'--proddir', dest=
'proddir', default=proddir, help=
'production directory (default: %s' % proddir)
109 parser.add_option(
'',
'--destdbname', dest=
'destdbname', default=
'CONDBR2', help=
'destination database instance name (default: CONDBR2)')
110 parser.add_option(
'',
'--srcdbname', dest=
'srcdbname', default=
'BEAMSPOT', help=
'source database instance name (default: BEAMSPOT)')
111 parser.add_option(
'',
'--srctag', dest=
'srctag', default=
'nominal', help=
'source tag (default: nominal)')
112 parser.add_option(
'-r',
'--runjoboptions', dest=
'runjoboptions', default=
'InDetBeamSpotExample/VertexTemplate.py', help=
'template to run beam spot jobs')
113 parser.add_option(
'',
'--runtaskname', dest=
'runtaskname', default=
'VTX', help=
'task name')
114 parser.add_option(
'-m',
'--monjoboptions', dest=
'monjoboptions', default=
'runBeamSpotMonitor.py', help=
'template to run monitoring jobs')
115 parser.add_option(
'',
'--bcidjoboptions', dest=
'bcidjoboptions', default=
'InDetBeamSpotExample/VertexTemplate.py', help=
'template to run BCID jobs')
116 parser.add_option(
'',
'--montaskname', dest=
'montaskname', default=
'MON', help=
'task name')
117 parser.add_option(
'',
'--bcidtaskname', dest=
'bcidtaskname', default=
'BCID', help=
'BCID task name')
118 parser.add_option(
'-g',
'--griduser', dest=
'griduser', default=
'user10.JuergBeringer', help=
'grid user name prefix (e.g. user10.JuergBeringer)')
119 parser.add_option(
'-n',
'--nowildcards', dest=
'nowildcards', action=
'store_true', default=
False, help=
'do not add wildcards when looking up dataset and task names')
120 parser.add_option(
'-x',
'--excludeiftask', dest=
'excludeiftask', default=
'', help=
'Exclude running cmd with runCmd if such task exists already')
121 parser.add_option(
'',
'--excludeds', dest=
'excludeds', default=
'', help=
'Exclude running cmd with runCmd for dataset containing a certain string')
122 parser.add_option(
'',
'--archive', dest=
'archive', action=
'store_true', default=
False, help=
'archive, ie delete data after backup')
123 parser.add_option(
'',
'--incremental', dest=
'incremental', action=
'store_true', default=
False, help=
'incremental backup')
124 parser.add_option(
'',
'--lbperjob', dest=
'lbperjob', type=
'int', default=5, help=
'number of luminosity blocks per job (default: 5)')
125 parser.add_option(
'',
'--params', dest=
'params', default=
'', help=
'job option parameters to pass to job option template')
126 parser.add_option(
'-z',
'--postprocsteps', dest=
'postprocsteps', type=
'commsep', default=[
'JobPostProcessing'], help=
'Task-level postprocessing steps, comma-separated (Default: JobPostProcessing)')
127 parser.add_option(
'',
'--srcdqtag', dest=
'srcdqtag', default=
'nominal', help=
'source DQ tag (default: nominal)')
128 parser.add_option(
'',
'--dqtag', dest=
'dqtag', default=
'HEAD', help=
'beam spot DQ tag')
129 parser.add_option(
'',
'--destdqdbname', dest=
'destdqdbname', default=
'CONDBR2', help=
'DQ destination database instance name (default: CONDBR2)')
130 parser.add_option(
'',
'--srcdqdbname', dest=
'srcdqdbname', default=
'IDBSDQ', help=
'DQ source database instance name (default: IDBSDQT)')
131 parser.add_option(
'',
'--dslist', dest=
'dslist', default=
'', help=
'Exclude running cmd with runCmd if the dataset is not in this file')
132 parser.add_option(
'',
'--nominbias', dest=
'nominbias', action=
'store_true', default=
False, help=
'overwrite MinBias_physics in DSNAME with express_express')
133 parser.add_option(
'',
'--resultsondisk', dest=
'resultsondisk', action=
'store_true', default=
False, help=
'Leave the results on disk when archiving')
134 parser.add_option(
'',
'--pLbFile', dest=
'pseudoLbFile', default=
None, help=
'File for pseudo LB info from scan')
135 parser.add_option(
'',
'--prefix', dest=
'prefix', default=
'', help=
'Prefix for reading files from mass storage (Default: determine from filename (`\'\') ')
136 parser.add_option(
'',
'--rl', dest=
'runMin', type=
'int', default=
None, help=
'Minimum run number for mctag (inclusive)')
137 parser.add_option(
'',
'--ru', dest=
'runMax', type=
'int', default=
None, help=
'Maximum run number for mctag (inclusive)')
138 parser.add_option(
'',
'--useRun', dest=
'useRun', type=
'int', default=
None, help=
'Run monitoring job for a given run only')
139 parser.add_option(
'',
'--noCheckAcqFlag', dest=
'noCheckAcqFlag', action=
'store_true', default=
False, help=
'Don\'t check acqFlag when submitting VdM jobs')
140 parser.add_option(
'',
'--resubAll', dest=
'resubAll', action=
'store_true', default=
False, help=
'Resubmit all jobs irrespective of status')
141 parser.add_option(
'',
'--resubRunning', dest=
'resubRunning', action=
'store_true', default=
False, help=
'Resubmit all "running" jobs')
142 parser.add_option(
'-q',
'--queue', dest=
'batch_queue', default=
None, help=
'Name of batch queue to use (default is context-specific)')
144 g_deprecated = OptionGroup(parser,
'Deprecated Options')
145 g_deprecated.add_option(
'',
'--mon', dest=
'legacy_mon', action=
'store_true', default=
False, help=
'mon directory structure (now inferred from montaskname)')
146 parser.add_option_group(g_deprecated)
148 (options, args) = parser.parse_args()
149 if len(args) < 1: parser.error(
'wrong number of command line arguments')
154 if not options.expertmode:
155 if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
156 sys.exit(
'ERROR: You must run this command in the production directory %s' % options.proddir)
157 if not os.path.exists(produserfile):
158 sys.exit(
'ERROR: Authorization file unreadable or does not exists %s' % produserfile)
159 if not subprocess.getoutput(
'grep `whoami` %s' % produserfile):
160 sys.exit(
'ERROR: You are not authorized to run this command (user name must be listed in produser file %s)' % produserfile)
162 if os.path.realpath(os.getcwd()) != os.path.realpath(options.proddir):
163 print (
'WARNING: You are not running in the production directory %s' % options.proddir)
171 with open(tier0dbinfofile,
'r')
as dbinfofile:
172 connstring = dbinfofile.read().strip()
174 sys.exit(
'ERROR: Unable to read connection information for Tier-0 database')
175 dbtype, dbname = connstring.split(
':',1)
177 sys.exit(
'ERROR: Invalid T0 connection string')
180 oracle = cx_Oracle.connect(dbname)
182 print (
'ERROR: First connection attempt to Tier-0 Oracle database failed; will retry in 10s ...')
185 oracle = cx_Oracle.connect(dbname)
186 except Exception
as e:
188 sys.exit(
'ERROR: Unable to connect to Tier-0 Oracle database')
190 sys.exit(
'ERROR: Unable to connect to Tier-0 Oracle database (invalid cx_Oracle connection)')
194 ''' Open task manager (used by all following commands, at the very least through subcommands) '''
196 return TaskManager(options.dbconn)
198 print (
'ERROR: Unable to access task manager database %s' % options.dbconn)
203 print (
'ERROR:', message)
207 ''' Given a run number and tag, check input dataset and work out name. '''
208 fs = DiskUtils.FileSet.from_ds_info(run,
209 project=options.project,
210 stream=options.stream,
211 base=options.eospath)
214 .use_files_from(options.filelist)
215 .matching(options.filter + tag +
'.*')
216 .excluding(
r'.*\.TMP\.log.*')
217 .only_single_dataset())
218 dataset = os.path.dirname(datasets[0])
219 dsname =
'.'.
join(os.path.basename(datasets[0]).
split(
'.')[:3])
220 return (dataset, dsname)
222 def run_jobs(script, ds_name, task_name, params, *args):
223 ''' Invoke runJobs.py '''
224 arg_list = [
'runJobs']
225 arg_list.extend(map(str, args))
228 for k,v
in params.items():
230 arg_list.extend([
'--params',
', '.
join(param_args)])
232 arg_list.append(
'--test')
233 arg_list.extend([script, ds_name, task_name])
235 print (subprocess.list2cmdline(arg_list))
236 subprocess.check_call(arg_list)
241 if cmd ==
'upload' and len(cmdargs) == 1:
243 if not options.beamspottag:
244 fail(
'No beam spot tag specified')
246 with open(prodcoolpasswdfile,
'r')
as passwdfile:
247 passwd = passwdfile.read().strip()
249 fail(
'Unable to determine COOL upload password')
252 with open(flaskcoolpasswdfile,
'r')
as flaskpasswdfile:
253 flaskpasswd = flaskpasswdfile.read().strip()
255 fail(
'Unable to determine FLASK upload password')
258 print (
'Beam spot file: ', dbfile)
259 print (
'Uploading to tag: ', options.beamspottag)
260 os.system(
'dumpBeamSpot.py -d %s -t %s %s' % (
266 stat = os.system(
'/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (
268 '--batch' if options.batch
else '',
269 (
'--ignoremode %s' % options.ignoremode)
if options.ignoremode
else '',
277 if stat:
fail(
"UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!")
283 if cmd==
'run' and len(args)==3:
288 run_jobs(options.runjoboptions, dsname,
289 '{}-LR5.{}'.
format(options.runtaskname, tag),
293 '--files-per-job', 0,
294 '--match', options.filter,
295 '--exclude',
r'.*\.TMP\.log.*',
296 '--directory', dataset)
303 if cmd==
'runMon' and len(args)==3:
307 if not options.beamspottag:
308 fail(
'No beam spot tag specified')
314 if 'ESD' in options.filter:
317 run_jobs(options.monjoboptions, dsname,
318 '{}.{}'.
format(options.montaskname, tag),
320 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
321 'useBeamSpot' :
True,
322 'beamspottag' : options.beamspottag,
325 '--match', options.filter,
326 '--exclude',
r'.*\.TMP\.log.*',
327 '--directory', dataset)
328 elif options.filelist !=
None:
330 for line
in open(options.filelist,
'r'):
332 lbinfoinfiles =
False
334 lboptions=
'--lbperjob=10' if lbinfoinfiles
else '--files-per-job=10'
335 run_jobs(options.monjoboptions, dsname,
'{}.{}'.
format(options.montaskname, tag),
337 'tracksAlreadyOnBeamLine' :
True,
338 'useBeamSpot' :
True,
339 'beamspottag' : options.beamspottag,
342 '--match', options.filter,
343 '--exclude',
r'.*\.TMP\.log.*',
344 '--directory', dataset,
345 '--queue',
'"tomorrow"')
347 queue = options.batch_queue
or '"tomorrow"'
348 print(
'Queue: ', queue )
351 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
352 'useBeamSpot' :
True,
353 'beamspottag' : options.beamspottag
357 for s
in options.params.split(
', '):
361 params[p[0].strip()] = eval(p[1].strip())
363 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
365 run_jobs(options.monjoboptions, dsname,
'{}.{}'.
format(options.montaskname, tag),params,
367 '--match', options.filter,
368 '--exclude',
r'.*\.TMP\.log.*',
369 '--directory', dataset,
378 if cmd==
'backup' and len(args)==2:
379 if options.archive
and options.incremental:
380 sys.exit(
'ERROR: Cannot do incremental archiving')
381 tmplist = glob.glob(args[1]+
'/')
383 for dslash
in tmplist:
385 if options.incremental:
386 baklog = d+
'/backup.log'
387 if os.path.exists(baklog):
388 cmd =
'find %s -maxdepth 1 -newer %s' % (d,baklog)
389 (status,output) = subprocess.getstatusoutput(cmd)
392 sys.exit(
"ERROR: Error executing %s" % cmd)
400 print (
'\nFound %i directories for backup:\n' % len(dirlist))
404 print (
'\n****************************************************************')
405 print (
'WARNING: ARCHIVING - DIRECTORIES WILL BE DELETED AFTER BACKUP!!!')
406 print (
'****************************************************************')
407 if not options.batch:
410 sys.exit(
'ERROR: Aborted by user')
414 outname = d.replace(
'/',
'-')+time.strftime(
'-%G_%m_%d.tar.gz')
417 outname = d.replace(
'/',
'-')+
'.tar.gz'
420 print (
'\nBacking up %s --> %s/%s ...\n' % (d,path,outname))
422 status = os.system(
'tar czf %s/%s %s' % (tmpdir,outname,d)) >> 8
424 sys.exit(
'\nERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
426 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
429 os.system(
'rm -rf %s/%s' % (path,outname) )
430 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
433 print (
'\nERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
436 os.system(
'rm %s/%s' % (tmpdir,outname))
437 status = os.system(
'echo "`date` %s/%s" >> %s/backup.log' % (path,outname,d)) >> 8
439 sys.exit(
'\nERROR: Could not update backup log file')
442 print (
'\nWARNING: DELETION OF SOURCE FILES NOT YET IMPLEMENTED\n')
449 if cmd ==
'lsbackup' and not cmdargs:
450 path = archivepath
if options.archive
else backuppath
451 print (
'Backup directory:', path)
452 for f
in DiskUtils.from_directory(path):
460 if cmd ==
'show' and len(cmdargs)==1:
463 print (
'Base path: ', options.eospath)
464 print (
'Project tag: ', options.project)
465 print (
'Stream: ', options.stream)
467 print (
'Files available (filtered by: {}):'.
format(options.filter))
468 print (
'---------------')
470 fs = DiskUtils.FileSet.from_ds_info(run,
471 project=options.project,
472 stream=options.stream,
473 base=options.eospath)
474 for f
in fs.matching(options.filter):
478 print (
'Beam spot tasks:')
479 print (
'---------------')
481 for t
in taskman.taskIterDict(qual=[
"where DSNAME like '%%%s%%' order by UPDATED" % run]):
482 print (
'%-45s %-45s %4s job(s), last update %s' % (t[
'DSNAME'],t[
'TASKNAME'],t[
'NJOBS'],time.ctime(t[
'UPDATED'])))
489 if cmd ==
'postproc' and not cmdargs:
491 for t
in taskman.taskIterDict(
492 qual=[
'where STATUS > %i and STATUS <= %i order by UPDATED' % (
493 TaskManager.StatusCodes[
'RUNNING'],
494 TaskManager.StatusCodes[
'POSTPROCESSING'])]):
498 if cmd ==
'postproc' and len(cmdargs)
in [2,3]:
500 taskname = cmdargs[1]
505 steps = options.postprocsteps
if len(cmdargs) < 3
else cmdargs[2].
split(
',')
507 print (
'Executing postprocessing tasks:', steps)
509 print (
'Executing postprocessing tasks as specified in task database')
517 confirmWithUser=
not options.batch,
518 addWildCards=
not options.nowildcards)
519 except TaskManagerCheckError
as e:
521 for taskName
in taskList:
522 t = taskman.getTaskDict(taskName[0], taskName[1])
533 if cmd==
'upload' and len(args)==3:
536 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
537 except TaskManagerCheckError
as e:
540 if not options.beamspottag:
541 sys.exit(
'ERROR: No beam spot tag specified')
543 dbfile = glob.glob(
'%s/%s/*-beamspot.db' % (dsname,task))
545 print (
'ERROR: Missing or ambiguous input COOL file:',dbfile)
549 with open(prodcoolpasswdfile,
'r')
as passwdfile:
550 passwd = passwdfile.read().strip()
552 sys.exit(
'ERROR: Unable to determine COOL upload password')
555 with open(flaskcoolpasswdfile,
'r')
as flaskpasswdfile:
556 flaskpasswd = flaskpasswdfile.read().strip()
558 fail(
'Unable to determine FLASK upload password')
560 print (
'\nData set: ',dsname)
561 print (
'Beam spot file: ',dbfile[0])
562 print (
'Uploading to tag: ',options.beamspottag)
563 os.system(
'dumpBeamSpot.py -d %s -t %s %s' % (options.srcdbname,options.srctag,dbfile[0]))
565 if options.ignoremode:
566 ignoremode =
'--passopt="--appendlocked --ignoremode %s"' % options.ignoremode
570 batchmode =
'--batch'
574 print(
'command: /afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret <flaskpassword> --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W <password>' % (batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname))
575 stat = os.system(
'/afs/cern.ch/user/a/atlcond/utilsproxy/AtlCoolMerge.py --flask --nobackup --client_id cool-flask-beamspot-client --client_secret %s --nomail %s %s --folder /Indet/Beampos --tag %s --retag %s --destdb %s %s %s ATONR_COOLOFL_GPN ATLAS_COOLOFL_INDET_W %s' % (flaskpasswd,batchmode,ignoremode,options.srctag,options.beamspottag,options.destdbname,dbfile[0],options.srcdbname,passwd))
578 print (
"\n\nERROR: UPLOADING TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
582 uploadflag = dbfile[0]+
'.uploaded'
583 stat = os.system(
'echo "`date` %s" >> %s' % (options.beamspottag,uploadflag) )
585 print (
"ERROR: Uploading was successful, but unable to set upload flag", uploadflag )
587 cooltags = options.beamspottag
595 nextbeamspot = COOLUtils.resolveNextBeamSpotFolder()
598 if nextbeamspot == options.beamspottag:
600 if not (
'UPD' in options.beamspottag
and 'UPD' in nextbeamspot):
602 if nextbeamspot !=
'':
603 print (
'Additionally uploading to Next tag: ',nextbeamspot)
607 t = taskman.getTaskDict(dsname,task)
608 taskman.setValue(dsname,task,
'COOLTAGS',
appendUnique(t[
'COOLTAGS'],cooltags))
616 if cmd==
'dq2get' and len(args)==3:
619 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,addWildCards=
not options.nowildcards)
620 except TaskManagerCheckError
as e:
623 dir = os.path.join(dsname, task)
624 griddsname =
'%s.%s-%s' % (options.griduser,dsname,task)
625 path = os.path.join(dir, griddsname)
626 if os.path.exists(path):
627 print (
'ERROR: Path exists already:',path)
629 print (
'Extracting into %s/%s ...' % (dir,griddsname))
630 stat = os.system(
'cd %s; dq2-get -T 6,6 %s' % (dir,griddsname))
632 print (
"ERROR: Problem occurred while extracting data set - task status not changed")
634 statfile = glob.glob(
'%s/000/*.status.SUBMITTED' % (dir))
636 print (
"ERROR: Unable to uniquely identfying status - giving up. Candidate status files are:")
639 os.system(
'rm %s' % statfile[0])
640 basename = statfile[0][:-17]
641 os.system(
'touch %s.exit.0' % basename)
642 os.system(
'touch %s.COMPLETED' % basename)
643 print (
"\nSuccessfully downloaded data set",griddsname)
644 os.system(
'du -hs %s' % path)
652 if cmd==
'queryT0' and len(args)==3:
655 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,addWildCards=
not options.nowildcards)
656 except TaskManagerCheckError
as e:
659 tags = task.split(
'.')[-1]
661 if 'ESD' in options.filter:
662 t0TaskName =
'%s.recon.ESD.%s.beamspotproc.task' % (dsname,tags)
664 if any(t[0] ==
'm' for t
in tags.split(
'_')):
665 t0TaskName =
'%s.merge.AOD.%s.beamspotproc.task' % (dsname,tags)
667 t0TaskName =
'%s.recon.AOD.%s.beamspotproc.task' % (dsname,tags)
669 print (
'Querying Tier-0 database for task',t0TaskName,
'...')
671 cur = oracle.cursor()
674 sql =
str(
"SELECT status FROM tasks WHERE taskname='%s' AND tasktype='beamspotproc'" % t0TaskName)
677 except Exception
as e:
679 sys.exit(
'ERROR: Unable to retrieve status of task %s' % t0TaskName)
681 sys.exit(
'ERROR: No such task found: %s' % t0TaskName)
683 sys.exit(
'ERROR: %i tasks found - unable to determine task status' % len(r))
685 print (
'\nTask status = %s\n' % status)
686 if status==
'FINISHED' or status==
'TRUNCATED':
695 if cmd==
'runCmd' and len(args)==4:
699 if options.nowildcards:
700 qual = [
"where DSNAME like '%s' and TASKNAME like '%s'" % (dssel,tasksel)]
702 qual = [
"where DSNAME like '%%%s%%' and TASKNAME like '%%%s%%'" % (dssel,tasksel)]
703 if options.beamspottag:
704 qual.append(
"and COOLTAGS like '%%%s%%'" % options.beamspottag)
706 qual.append(
"order by RUNNR")
707 print (
'Running command\n')
708 print (
' ',cmd % ({
'DSNAME':
'DSNAME',
'TASKNAME':
'TASKNAME',
'RUNNR':
'RUNNR',
'FULLDSNAME':
'FULLDSNAME'}))
709 print (
'\nover the following datasets / tasks:\n')
710 print (
' %-10s %-40s %s' % (
'Run',
'Dataset',
'Task'))
711 print (
' %s' % (74*
'-'))
715 for t
in taskman.taskIterDict(
'*',qual):
719 if options.nominbias
and dsname.find(
'physics_MinBias') :
720 print (
'Warning: changing physics_MinBias in dsname to express_express')
721 dsname = dsname.replace(
'physics_MinBias',
'express_express')
725 taskname = t[
'TASKNAME']
726 print (
' %-10s %-40s %s'% (runnr,dsname,taskname))
728 if options.excludeiftask:
729 if options.nowildcards:
731 n = taskman.getNTasks([
"where DSNAME ='%s' and TASKNAME like '%s'" % (dsname,options.excludeiftask)])
733 n = taskman.getNTasks([
"where DSNAME ='%s' and TASKNAME like '%%%s%%'" % (dsname,options.excludeiftask)])
735 print (
' ==> SKIPPING - %i matching tasks found' % n)
738 if options.excludeds:
739 excludeList = options.excludeds.split(
',')
740 for s
in excludeList:
742 print (
' ==> SKIPPING dataset')
746 stat, out = subprocess.getstatusoutput(
'grep -c %s %s' % (dsname, options.dslist))
753 print (
' ==> SKIPPING - dataset %s not found in %s' % (dsname, options.dslist))
761 stat, out = subprocess.getstatusoutput(
'grep %s %s' % (dsname, options.dslist))
766 t[
'FULLDSNAME'] = out.strip()
771 print (
'\nNo jobs need to be run.\n')
774 print (
'\n%i datasets / tasks selected.\n' % len(taskList))
775 if not options.batch:
776 a =
input(
'\nARE YOU SURE [n] ? ')
778 sys.exit(
'ERROR: Running of monitoring tasks aborted by user')
782 print (
'\nExecuting: ',fullcmd,
' ...')
793 if cmd==
'runMonJobs' and len(args)<3:
795 earliestUpdateTime = time.time()-
float(args[1])
797 earliestUpdateTime = 0
798 if not options.beamspottag:
799 sys.exit(
'ERROR: No beam spot tag specified')
801 print (
'Running the following monitoring tasks for tasks of type %s:\n' % options.runtaskname)
802 print (
' %-10s %s' % (
'RUN',
'MONITORING TASK'))
803 print (
' %s' % (60*
'-'))
805 onDiskCode = TaskManager.OnDiskCodes[
'ALLONDISK']
808 for t
in taskman.taskIterDict(
'*',[
"where TASKNAME like '%s%%'" % options.runtaskname,
809 "and UPDATED >= ", DbParam(earliestUpdateTime),
810 "and ONDISK = ", DbParam(onDiskCode),
811 "and COOLTAGS like '%%%s%%'" % options.beamspottag,
812 "order by RUNNR desc"]):
815 taskName = t[
'TASKNAME']
816 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
817 monTaskName =
'MON.%s.%s' % (taskName,datatag)
819 if options.useRun
is not None:
821 if runnr == options.useRun:
826 m =
next(taskman.taskIterDict(
'*',[
"where RUNNR =",DbParam(runnr),
"and DSNAME =",DbParam(dsname),
"and TASKNAME =",DbParam(monTaskName),
"order by UPDATED desc"]))
827 print (
' %-10s %s %s'% (runnr,dsname,monTaskName))
829 print (
' * %-10s %s %s'% (runnr,dsname,
'--- no monitoring task found ---'))
834 print (
'\nNo jobs need to be run.\n')
837 if not options.batch:
838 a =
input(
'\nARE YOU SURE [n] ? ')
840 sys.exit(
'ERROR: Running of monitoring tasks aborted by user')
847 ptag = dsname.split(
'.')[0]
848 stream = dsname.split(
'.')[2]
849 taskName = t[
'TASKNAME']
850 fulldatatag = taskName.split(
'.')[-1]
851 datatag = fulldatatag.split(
'_')[0]
852 monTaskName =
'MON.%s' % (taskName)
856 cooltags = t[
'COOLTAGS']
857 if not cooltags: cooltags =
''
858 bstag = cooltags.split()[0]
861 if any(t[0] ==
'm' for t
in fulldatatag.split(
'_')):
862 t0dsname =
'%s.merge.AOD.%s%%' % (dsname, datatag)
864 t0dsname =
'%s.recon.AOD.%s%%' % (dsname, datatag)
867 if 'ESD' in c[
'inputfiles'][0]:
869 t0dsname =
'%s.recon.ESD.%s' % (dsname, datatag)
871 print (
'\nRunning monitoring job for run %s:' % runnr)
874 eospath=options.eospath
878 if int(runnr)<240000:
879 print (
'... Querying T0 database for replication of %s' % t0dsname)
880 cur = oracle.cursor()
881 cur.execute(
"select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
884 print (
' WARNING: input data not yet replicated - please retry later')
887 print (
'... Querying T0 database for completion of merging jobs of %s' % t0dsname)
888 cur = oracle.cursor()
889 origt0TaskName=
'%s.recon.AOD.%s%%.aodmerge.task' % (dsname,datatag)
890 cur.execute(
"select status from tasks where taskname like '%s' and tasktype='aodmerge'" % origt0TaskName)
893 print (
' WARNING: can\'t get status of merge job for %s, running on un-merged samples instead' % origt0TaskName)
894 eospath=
'/eos/atlas/atlastier0/tzero/prod'
895 elif not (r[0][0]==
'FINISHED' or r[0][0]==
'TRUNCATED'):
896 print (
' Merge job for taskname %s is not finished yet, has status %s, running on un-merged samples instead.' % (origt0TaskName, r[0][0]))
897 eospath=
'/eos/atlas/atlastier0/tzero/prod'
899 print (
' Merge job is finished, launching jobs.')
903 if int(runnr)<240000:
905 print (
'... Submitting monitoring task')
906 queue = options.batch_queue
or '\'\"tomorrow\"\''
909 paramValues =
'--params \''+options.params+
'\''
914 cmd =
'beamspotman --eospath=%s -p %s -s %s -f \'.*\\.%s\\..*\' -t %s --queue %s %s %s --montaskname %s runMon %i %s' % (eospath,ptag,stream,filter,bstag,queue,paramValues,testFlag,monTaskName,
int(runnr),datatag)
917 status = os.system(cmd) >> 8
919 print (
'\nERROR: Job submission returned error - exit code %i\n')
928 if cmd==
'archive' and len(args)==3:
929 if not options.batch:
930 print (
'\nWARNING: If you confirm below, each of the following datasets will:')
931 print (
' - be archived to EOS')
932 if options.resultsondisk:
933 print (
' - will be marked as RESULTSONDISK in the task database')
934 print (
' - all except the results files *** WILL BE DELETED ***')
936 print (
' - will be marked as ARCHIVED in the task database')
937 print (
' - all its files *** WILL BE DELETED ***')
942 taskList =
getFullTaskNames(taskman,args[1],args[2],confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
943 except TaskManagerCheckError
as e:
949 onDiskCode = TaskManager.OnDiskCodes.get(
'ALLONDISK',
None)
950 archivedCode = TaskManager.OnDiskCodes.get(
'RESULTSONDISK',
None)
if options.resultsondisk
else TaskManager.OnDiskCodes.get(
'ARCHIVED',
None)
951 exceptList = [
'*dqflags.txt',
'*.gif',
'*.pdf',
'*.config.py*',
'*.argdict.gpickle',
'*.AveBeamSpot.log',
'*.PlotBeamSpotCompareReproc.log',
'*.sh',
'*.BeamSpotNt*',
'*.BeamSpotGlobalNt.log',
'*.status.*',
'*.exit.*']
953 for (dsname,taskname)
in taskList:
954 t = taskman.getTaskDict(dsname,taskname)
957 if t[
'ONDISK'] != onDiskCode:
958 print (
'Skipping task %s / %s status %s (task files must be on disk)' % (dsname,taskname,
getKey(TaskManager.OnDiskCodes,t[
'ONDISK'])))
962 dir =
'%s/%s' % (dsname,taskname)
963 outname = dir.replace(
'/',
'-')+time.strftime(
'-%G_%m_%d.tar.gz')
964 print (
'Archiving task %s / %s ...' % (dsname,taskname))
965 print (
' --> %s/%s ...' % (path,outname))
968 if dir==
'.' or dir==
'*':
969 print (
'\n**** FATAL ERROR: Very dangerous value of task directory found: %s - ABORTING' % dir)
973 if os.path.exists(dir):
974 status = os.system(
'tar czf %s/%s %s' % (tmpdir,outname,dir)) >> 8
976 sys.exit(
'\n**** ERROR: Unable to create local tar file %s/%s' % (tmpdir,outname))
977 status = os.system(
'xrdcp %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
979 os.system(
'rm -rf %s/%s' % (path,outname) )
980 status = os.system(
'xrdcp -f %s/%s root://eosatlas.cern.ch/%s/%s' % (tmpdir,outname,path,outname)) >> 8
982 sys.exit(
'\n**** ERROR: Unable to copy file to EOS to %s/%s' % (path,outname))
984 os.system(
'rm %s/%s' % (tmpdir,outname))
985 n = taskman.setValue(dsname,taskname,
'ONDISK',archivedCode)
987 sys.exit(
'\n**** ERROR: Unexpected number of tasks modified: %i instead of 1 (DSNAME=%s,TASKNAME=%s)' % (n,dsname,taskname))
989 if options.resultsondisk:
990 oscmd =
"find %s ! \( -name '%s' \) -type f -exec rm {} \;" % (dir,
"' -or -name '".
join(exceptList))
993 os.system(
'rm -rf %s' % dir)
995 print (
'\n**** ERROR: No task directory',dir,
'\n')
1005 if cmd==
'resubmit' and len(args)
in [3,4]:
1013 queue = args[3]
if len(args) == 4
else options.batch_queue
1015 print (
'ERROR: No queue was specified (use -q)')
1018 basepath = os.path.join(os.getcwd(), dsname, taskname)
1019 dircontents = os.listdir(basepath)
1021 condorScriptTemplate=
"""executable = %(scriptfile)s
1022 arguments = $(ClusterID) $(ProcId)
1023 output = %(logfile)s.out
1024 error = %(logfile)s.err
1025 log = %(logfile)s.log
1027 +JobFlavour = %(batchqueue)s
1032 for dir
in dircontents:
1033 if not os.path.isdir(os.path.join(basepath, dir)):
1037 if (options.montaskname
in taskname.split(
'.'))
or options.legacy_mon:
1038 jobname =
'-'.
join([dsname, taskname,
'lb' + dir])
1039 fullpath = os.path.join(basepath, dir)
1043 for f
in os.listdir(fullpath):
1044 if re.search(
'RUNNING', f):
1046 if re.search(
'COMPLETED',f)
or re.search(
'POSTPROCESSING',f):
1047 with open(os.path.join(fullpath, jobname +
'.exitstatus.dat'))
as statusFile:
1048 status = statusFile.read(1)
1053 if options.resubRunning
and isRunning:
1054 print (
"Will resubmit running job")
1055 elif (isRunning
or not isFailed)
and not options.resubAll:
1059 for f
in os.listdir(fullpath):
1060 if re.search(
'.exitstatus.', f):
1061 os.remove(os.path.join(fullpath, f))
1062 elif re.search(
'.exit.', f):
1063 os.remove(os.path.join(fullpath, f))
1064 elif re.search(
'.status.', f):
1065 os.remove(os.path.join(fullpath, f))
1066 elif re.search(
'.log', f):
1067 os.remove(os.path.join(fullpath, f))
1068 elif re.search(
'.py.final.py', f):
1069 os.remove(os.path.join(fullpath, f))
1073 'batchqueue' : queue,
1074 'jobname' : jobname,
1075 'jobdir' : fullpath,
1077 jobConfig[
'logfile'] =
'%(jobdir)s/%(jobname)s.log' % jobConfig
1078 jobConfig[
'scriptfile'] =
'%(jobdir)s/%(jobname)s.sh' % jobConfig
1080 condorScript = condorScriptTemplate % jobConfig
1081 print (condorScript)
1082 script =
open(
'condorSubmit.sub',
'w')
1083 script.write(condorScript)
1085 os.chmod(
'condorSubmit.sub',0o755)
1087 batchCmd =
'condor_submit condorSubmit.sub'
1093 print (taskman.getStatus(dsname, taskname))
1094 taskman.setStatus(dsname, taskname, TaskManager.StatusCodes[
'RUNNING'] )
1103 if cmd==
'reproc' and len(args)==5:
1104 from InDetBeamSpotExample
import HTCondorJobRunner
1111 lbperjob = options.lbperjob
1112 params = {
'LumiRange': lbperjob}
1113 cmd =
' '.
join(sys.argv)
1117 for s
in options.params.split(
', '):
1121 params[p[0].strip()] = eval(p[1].strip())
1123 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
1134 backend = DiskUtils.EOS()
if options.eos
else None
1135 fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1136 fs = fs.matching(options.filter).use_files_from(options.filelist).only_latest(options.removedups)
1137 for f, lbs
in file_set.with_lumi_blocks(options.lbfilemap):
1140 if not files:
fail(
'No files were found.')
1149 jobFileDict[jobId] = files
1150 jobLBDict[jobId] = []
1156 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1158 jobLBDict[jobId].
extend(lbs)
1162 lbs =
sorted(lbMap[f.split(
'/')[-1]])
1164 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1167 jobId =
int((lbnr-1)/lbperjob)
1169 if not jobId
in jobFileDict:
1170 jobFileDict[jobId] = [f]
1171 jobLBDict[jobId] = [lbnr]
1173 if not f
in jobFileDict[jobId]:
1174 jobFileDict[jobId].
append(f)
1175 jobLBDict[jobId].
append(lbnr)
1179 for i
in sorted(jobFileDict.keys()):
1180 jobnr = i*lbperjob+1
1181 files=jobFileDict[i]
1187 intlbs.append(
int(lbnr))
1188 params[
'lbList'] = intlbs
1189 jobname=dsname+
'-'+taskname+
'-lb%03i' % jobnr
1191 queue = options.batch_queue
or '"tomorrow"'
1192 runner = HTCondorJobRunner.HTCondorJobRunner(
1194 jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1198 joboptionpath=jobopts,
1199 filesperjob=len(files),
1201 addinputtopoolcatalog=
True,
1202 taskpostprocsteps=
'ReprocVertexDefaultProcessing',
1204 autoconfparams=
'DetDescrVersion',
1205 returnstatuscode=
True,
1212 except Exception
as e:
1213 print (
"ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1214 print (
"DEBUG: Exception =",e)
1216 taskman.addTask(dsname, taskname, jobopts, runner.getParam(
'release'), runner.getNJobs(), runner.getParam(
'taskpostprocsteps'), comment=cmd)
1227 if cmd==
'runaod' and len(args)==5:
1229 from InDetBeamSpotExample
import HTCondorJobRunner
1235 lbperjob = options.lbperjob
1236 params = {
'LumiRange': lbperjob}
1239 if options.pseudoLbFile:
1240 params = {
'LumiRange': 1}
1242 cmd =
' '.
join(sys.argv)
1246 for s
in options.params.split(
', '):
1250 params[p[0].strip()] = eval(p[1].strip())
1252 print (
'\nERROR parsing user parameter',p,
'- parameter will be ignored')
1255 backend = DiskUtils.EOS()
if options.eos
else None
1256 fs = DiskUtils.FileSet.from_input(inputdata, backend=backend)
1257 print (
"****************************************************")
1258 print (
"*************** printing files *********************")
1260 fs = fs.matching(options.filter)
1261 for f, lbs
in fs.with_lumi_blocks(options.lbfilemap):
1265 if not files:
fail(
'No files were found.')
1272 if options.pseudoLbFile:
1275 coolQuery = COOLQuery()
1276 from InDetBeamSpotExample.Utils
import getRunFromName
1280 with open(options.pseudoLbFile)
as pLbFile:
1281 for line
in pLbFile:
1282 if line[0] ==
'#':
continue
1284 tokens = line.split()
1285 plbnr,tstart,tend =
int(tokens[0]),
int(tokens[1]),
int(tokens[2])
1286 jobId =
int(plbnr/lbperjob)
1290 if not options.noCheckAcqFlag
and len(tokens)>=5
and abs(
float(tokens[4])-1.)>0.001:
1291 print (
"Point is not stationary -- skipping job %d" % jobId)
1295 rlbs = [lb
for (lb,time)
in lbTimes.items()
if (time[0] - tend/1e9)*(time[1] - tstart/1e9) < 0]
1303 sys.exit(
'No mapping for file %s' % f.split(
'/')[-1])
1305 if not sum([lb
for lb
in lbs
if lb
in rlbs]):
continue
1309 jobLBDict[jobId].
extend([lb
for lb
in rlbs
if not lb
in jobLBDict[jobId]])
1310 jobFileDict[jobId].
extend([f
for f
in filenames
if not f
in jobFileDict[jobId]])
1311 jobParams[jobId][
'lbData'].
append(line.strip(
'\n').strip())
1313 jobLBDict[jobId] = rlbs
1314 jobFileDict[jobId] = filenames
1315 jobParams[jobId] = {
'lbData' : [line.strip(
'\n').strip()]}
1322 print (
'WARNING: No mapping for file %s. Skipping' % f.split(
'/')[-1])
1327 jobId =
int((lbnr-1)/lbperjob)
1329 if not jobId
in jobFileDict:
1330 jobFileDict[jobId] = [f]
1331 jobLBDict[jobId] = [lbnr]
1333 if not f
in jobFileDict[jobId]:
1334 jobFileDict[jobId].
append(f)
1335 jobLBDict[jobId].
append(lbnr)
1339 for i
in sorted(jobFileDict.keys()):
1340 jobnr = i*lbperjob+1
1341 files=jobFileDict[i]
1346 intlbs.append(
int(lbnr))
1347 params[
'lbList'] = intlbs
1352 for k,v
in p.items(): params[k] = v
1356 jobname=dsname+
'-'+taskname+
'-lb%03i' % jobnr
1358 queue = options.batch_queue
1361 queue=
'"tomorrow"' if options.pseudoLbFile
else '"tomorrow"'
1362 runner = HTCondorJobRunner.HTCondorJobRunner(
1364 jobdir=os.path.join(os.getcwd(), dsname, taskname, jobname),
1368 joboptionpath=jobopts,
1369 filesperjob=len(files),
1371 addinputtopoolcatalog=
True,
1372 taskpostprocsteps=
' '.
join(options.postprocsteps),
1373 autoconfparams=
'DetDescrVersion',
1374 returnstatuscode=
True,
1377 if options.testonly:
1382 except Exception
as e:
1383 print (
"ERROR: Unable to configure JobRunner job - perhaps same job was already configured / run before?")
1384 print (
"DEBUG: Exception =",e)
1386 taskman.addTask(dsname, taskname, jobopts, runner.getParam(
'release'), runner.getNJobs(), runner.getParam(
'taskpostprocsteps'), comment=cmd)
1396 if cmd==
'dqflag' and len(args)==2:
1398 if not options.dqtag:
1399 sys.exit(
'ERROR: No beamspot DQ tag specified')
1401 with open(proddqcoolpasswdfile,
'r')
as passwdfile:
1402 passwd = passwdfile.read().strip()
1404 sys.exit(
'ERROR: Unable to determine DQ COOL upload password')
1406 print (
'\nBeam spot DQ file: ',dbfile)
1407 print (
'Uploading to tag: ',options.dqtag)
1409 if options.ignoremode:
1410 ignoremode =
'--ignoremode %s' % options.ignoremode
1415 batchmode =
'--batch'
1420 cmd =
'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1423 stat = os.system(cmd)
1429 print (
"\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1436 if cmd==
'dqflag' and len(args)==3:
1439 [(dsname,task)] =
getFullTaskNames(taskman,args[1],args[2],requireSingleTask=
True,confirmWithUser=
not options.batch,addWildCards=
not options.nowildcards)
1440 except TaskManagerCheckError
as e:
1444 if not options.dqtag:
1445 sys.exit(
'ERROR: No beam spot DQ tag specified')
1447 dbfile = glob.glob(
'%s/%s/*-dqflags.db' % (dsname,task))
1449 print (
'ERROR: Missing or ambiguous input COOL DQ file:',dbfile)
1453 with open(proddqcoolpasswdfile,
'r')
as passwdfile:
1454 passwd = passwdfile.read().strip()
1456 sys.exit(
'ERROR: Unable to determine DQ COOL upload password')
1458 print (
'\nData set: ',dsname)
1459 print (
'Beam spot DQ file: ',dbfile[0])
1460 print (
'Uploading to tag: ',options.dqtag)
1462 if options.ignoremode:
1463 ignoremode =
'--ignoremode %s' % options.ignoremode
1467 batchmode =
'--batch'
1471 cmd =
'dq_defect_copy_defect_database.py --intag %s --outtag %s "sqlite://;schema=%s;dbname=%s" "oracle://ATLAS_COOLWRITE;schema=ATLAS_COOLOFL_GLOBAL;dbname=%s;"' %(options.srcdqtag, options.dqtag, dbfile[0], options.srcdqdbname, options.destdqdbname)
1472 print (
"Running %s" % cmd)
1473 stat = os.system(cmd)
1478 print (
"\n\nERROR: UPLOADING DQ FLAG TO COOL FAILED - PLEASE CHECK CAREFULLY!\n\n")
1482 uploadflag = dbfile[0]+
'.uploaded'
1483 stat = os.system(
'echo "`date` %s" >> %s' % (options.dqtag,uploadflag) )
1485 print (
"ERROR: Uploading DQ flag was successful, but unable to set upload flag", uploadflag)
1492 if cmd==
'runBCID' and len(args)==3:
1502 run_jobs(options.bcidjoboptions, dsname, options.bcidtaskname,
1504 'cmdjobpreprocessing' :
'export STAGE_SVCCLASS=atlcal; export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase',
1505 'SeparateByBCID' :
True,
1506 'VertexNtuple' :
False,
1508 '--files-per-job', 0,
1509 '--match', options.filter,
1510 '--exclude',
r'.*\.TMP\.log.*',
1511 '-postprocsteps',
'BCIDDefaultProcessing')
1518 if cmd==
'runBCIDJobs' and len(args)<3:
1520 earliestUpdateTime = time.time()-
float(args[1])
1522 earliestUpdateTime = 0
1524 if not options.beamspottag:
1525 sys.exit(
'ERROR: No beam spot tag specified')
1527 print (
'Running the following BCID tasks for tasks of type %s:\n' % options.runtaskname)
1528 print (
' %-10s %s' % (
'RUN',
'BCID TASK'))
1529 print (
' %s' % (60*
'-'))
1533 for t
in taskman.taskIterDict(
'*',[
"where TASKNAME like '%s%%'" % options.runtaskname,
1534 "and UPDATED >= ", DbParam(earliestUpdateTime),
1535 "and COOLTAGS like '%%%s%%'" % options.beamspottag,
1536 "order by RUNNR desc"]):
1537 dsname = t[
'DSNAME']
1539 taskName = t[
'TASKNAME']
1540 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
1541 bcidTaskName =
'BCID.%s.%s' % (taskName,datatag)
1544 m =
next(taskman.taskIterDict(
'*',[
'where RUNNR =',DbParam(runnr),
'and DSNAME =',DbParam(dsname),
'and TASKNAME =',DbParam(bcidTaskName),
'order by UPDATED desc']))
1545 print (
' %-10s %s'% (runnr,bcidTaskName))
1547 print (
' * %-10s %s'% (runnr,
'--- no BCID task found ---'))
1552 print (
'\nNo jobs need to be run.\n')
1555 if not options.batch:
1556 a =
input(
'\nARE YOU SURE [n] ? ')
1558 sys.exit(
'ERROR: Running of BCID tasks aborted by user')
1563 dsname = t[
'DSNAME']
1565 ptag = dsname.split(
'.')[0]
1566 stream = dsname.split(
'.')[2]
1567 taskName = t[
'TASKNAME']
1568 fulldatatag = taskName.split(
'.')[-1].
split(
'_')[0]
1569 datatag = taskName.split(
'.')[-1].
split(
'_')[0]
1570 bcidTaskName =
'BCID.%s.%s' % (taskName,datatag)
1573 if any(t[0] ==
'm' for t
in fulldatatag.split(
'_')):
1574 t0dsname =
'%s.merge.%s.%s%%' % (dsname, filter, datatag)
1576 t0dsname =
'%s.recon.%s.%s%%' % (dsname, filter, datatag)
1579 if 'ESD' in c[
'inputfiles'][0]:
1581 t0dsname =
'%s.recon.ESD.%s' % (dsname, datatag)
1583 print (
'\nRunning BCID job for run %s:' % runnr)
1585 print (
'... Querying T0 database for replication of %s' % t0dsname)
1586 cur = oracle.cursor()
1587 cur.execute(
"select DATASETNAME,PSTATES from DATASET where DATASETNAME like '%s' and PSTATES like '%%replicate:done%%'" % t0dsname)
1590 print (
' WARNING: input data not yet replicated - please retry later')
1593 print (
'... Submitting BCID task')
1594 cmd =
'beamspotman.py -p %s -s %s -f \'.*\\.%s\\..*\' --bcidtaskname %s runBCID %i %s' % (ptag,stream,filter,
'BCID.'+taskName,
int(runnr),datatag)
1597 status = os.system(cmd) >> 8
1599 print (
'\nERROR: Job submission returned error - exit code %i\n')
1608 if cmd==
'mctag' and len(args)<12:
1612 if not options.beamspottag:
1613 sys.exit(
'ERROR: No beam spot tag specified')
1615 dbfile=options.beamspottag +
'.db'
1618 runMin = options.runMin
if options.runMin
is not None else 0
1619 runMax = options.runMax
if options.runMax
is not None else (1 << 31)-1
1622 runMin=runMin, runMax=runMax,
1623 status=
int(args[1]),
1626 tiltX=
float(args[8])
if len(args)>8
else 0.,
1627 tiltY=
float(args[9])
if len(args)>9
else 0.,
1628 sigmaXY=
float(args[10])
if len(args)>10
else 0.,
1629 posXErr=0., posYErr=0., posZErr=0.,
1630 sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1631 tiltXErr=0., tiltYErr=0.,
1634 print (
'* MC beamspot tag written to db=OFLP200, tag=%s in %s ' %(options.beamspottag, dbfile))
1635 print (
' - AtlCoolConsole.py "sqlite://;schema=' + dbfile +
';dbname=OFLP200"')
1636 print (
'* To upload to oracle use:')
1637 print (
' - beamspotman.py --srctag %s -t %s --srcdbname OFLP200 --destdbname OFLP200 upload %s' %(options.beamspottag, options.beamspottag, dbfile))
1645 if cmd==
'maketag' and len(args)<12:
1649 if not options.beamspottag:
1650 sys.exit(
'ERROR: No beam spot tag specified')
1652 dbfile=options.beamspottag +
'.db'
1653 dbName=options.destdbname
1654 folderHandle =
openBeamSpotDbFile(dbfile, dbName = options.destdbname, forceNew =
True)
1656 runMin = options.runMin
if options.runMin
is not None else 0
1657 runMax = options.runMax
if options.runMax
is not None else (1 << 31)-1
1660 runMin=runMin, runMax=runMax,
1661 status=
int(args[1]),
1664 tiltX=
float(args[8])
if len(args)>8
else 0.,
1665 tiltY=
float(args[9])
if len(args)>9
else 0.,
1666 sigmaXY=
float(args[10])
if len(args)>10
else 0.,
1667 posXErr=0., posYErr=0., posZErr=0.,
1668 sigmaXErr=0., sigmaYErr=0., sigmaZErr=0.,
1669 tiltXErr=0., tiltYErr=0.,
1672 print (
'* Beamspot tag written to db=%s, tag=%s in %s ' %(dbName,options.beamspottag, dbfile))
1673 print (
' - AtlCoolConsole.py "sqlite://;schema=' + dbfile +
';dbname=%s"' %(dbName))
1674 print (
'* To upload to oracle use:')
1675 print (
' - beamspotman.py --srctag %s -t %s --srcdbname %s --destdbname %s upload %s' %(options.beamspottag, options.beamspottag, dbName, dbName, dbfile))
1676 print (
' - /afs/cern.ch/user/a/atlcond/utils22/AtlCoolMerge.py --nomail %s %s ATLAS_COOLWRITE ATLAS_COOLOFL_INDET_W <passwd>' %(dbfile, dbName))
1680 print (
'ERROR: Illegal command or number of arguments ({})'.
format(
' '.
join(args)))