ATLAS Offline Software
CalibDataClass.py
Go to the documentation of this file.
1 #!/bin/env python
2 
3 from __future__ import print_function
4 
5 import cx_Oracle
6 import sys
7 import traceback
8 import time
9 import datetime
10 import calendar
11 import re
12 import os
13 import subprocess
14 
15 # database configuration info
16 import CalibDbInfo
17 
18 # if ATLAS env is initialized import from there but fallback to copy in working dir if available
19 try:
21 except ImportError:
22  from MuonFixedIdUnpack import *
23 
24 class CalibDataError(Exception):
25  def __init__(self, caller, exc=None):
26  if caller.debug == True:
27  caller.ptrace()
28  if isinstance(exc,cx_Oracle.Error):
29  error, = exc.args
30  if isinstance(error,str):
31  self.args += error,
32  else:
33  self.args += error.message,
34  return
35  if isinstance(exc,str):
36  self.args += exc,
37  return
38  if isinstance(exc,Exception):
39  self.args = exc.args
40  return
41 
52 
53 class CalibData:
54  service_re = re.compile('\‍(SERVICE_NAME\s*\=\s*([A-Za-z.]+)\‍)+')
55  # unix timestamp
56  unixts = re.compile('[0-9]{10,11}')
57 
58  def __init__(self,head_id=None,impl=None,lr=None,ur=None,lt=None,ut=None,luminosity=None,rootfile=None,statusflag=None):
59 
60  # counter of operations not yet committed
61  self.opcount = 0
62  # committed operations
63  self.optotal = 0
64  # increments when transaction is committed
65  self.transtotal = 0
66  # total time for database ops (seconds)
67  self.dbtime = { 'wtime': 0, 'ptime': 0 }
68  #internal timer used to calculate timing intervals
69  self.timer = { 'wtime':0, 'ptime': 0 }
70  # variable to "lock" timer if engaged
71  self.timelock = False
72 
73  # break operations up into transactions less than maxops?
74  # this really only fully applies to delete and batch insert operations from files (not t0 or rt copying operations).
75  # For a copy operation, if maxops is less than the number of rows inserted into a single table at once then
76  # it will cause operations to be broken up into a single transaction for each insert (so insert into MDT_TUBE, commit, then MDT_TUBE_C, commit, etc)
77  # It's always been broken for the case of batch copies, and there doesn't seem any point to fixing it now since we've done
78  # away with Oracle streams and don't need the per-transaction row limiting enabled - in fact breaking any mass insert/copy up into multiple
79  # transactions is certainly a bad idea which would never have happened except for the issues that large transactions cause with Oracle Streams.
80  self.limit = False
81  self.maxops = 10000
82 
83  self.debug = False
84 
85  if lr == None:
86  self.lowrun = 1
87  else:
88  self.lowrun = lr
89 
90  if ur == None:
91  self.uprun = 999999
92  else:
93  self.uprun = ur
94 
95  if lt == None:
96  lt = "19700101 00:00:00"
97  if ut == None:
98  ut = "20690721 00:37:33"
99 
100  if self.unixts.match(str(lt)) != None:
101  self.lowtime_string = datetime.datetime.fromtimestamp(lt)
102  self.lowtime = lt
103  else:
104  lt = lt.replace("-","")
105  lowtime_converted = time.strptime(lt, "%Y%m%d %H:%M:%S")
106  self.lowtime = calendar.timegm(lowtime_converted)
107  self.lowtime_string = lt
108 
109  if self.unixts.match(str(ut)) != None:
110  self.uptime_string = datetime.datetime.fromtimestamp(ut)
111  self.uptime = ut
112  else:
113  ut = ut.replace("-","")
114  uptime_converted = time.strptime(ut, "%Y%m%d %H:%M:%S")
115  self.uptime = calendar.timegm(uptime_converted)
116  self.uptime_string = ut
117 
118  if luminosity == None:
119  self.luminosity = 1.0
120  else:
121  self.luminosity = luminosity
122 
123  if rootfile == None:
124  self.rootfile = "calib_fit.root"
125  else:
126  self.rootfile = rootfile
127 
128  if statusflag == None:
129  self.statusflag = "NULL"
130  else:
131  self.statusflag = statusflag
132 
133  self.implementation = impl
134 
135  self.type = None
136  self.calibflag = 0
137  self.insert_time = None
138 
139  # static rt "dummy" vars
140  self.histogram = ""
141  self.n_segs = 10000
142  self.aver_angle = 0
144  self.aver_spread = 9.99989986
145  self.delta_aver_spread = 9.99989986
146  self.convergence = 0
147  self.b_aver = 0
148  self.t_aver = 0
149  self.hv = 0
150  self.curr = 0
151  self.gas = 0
152  self.validflag = 3
153  self.bins = 100
154 
155  # t0 related
156  # this can't be global, i think
157  self.t0_tube_grouping = None
158  self.tzeros = []
159 
160  id,db = self.parse_id_arg(head_id)
161 
162  self.head_id = id
163 
164  if db != None:
165  self.setdb(db)
166  else:
167  self.setdb(CalibDbInfo.calibdb)
168 
169  self.schema_active = None
170  self.schema_archived = None
172  self.mconn = None
173  self.dconn = None
174  self.dcursor = None
175  self.mcursor = None
176  self.mschema = None
177  self.dschema = None
178  self.writer_account = None
179  self.dbuser = None
180  self.proxyuser = True
181  self.linkid = None
182 
183  def __del__(self):
184  self.drop_dblink()
185  if self.dconn != None:
186  if self.debug == True:
187  print("Closing calibration database connection")
188  self.dconn.close()
189  if self.mconn != None and self.replica == False:
190  if self.debug == True:
191  print("Closing metadata database connection")
192  try:
193  self.mconn.close()
194  except cx_Oracle.InterfaceError as exc:
195  # IF there is only one consolidated schema then dconn and mconn point to the same connection
196  # ignore a 'not connected' error from trying to close that connection twice
197  if exc == 'not connected':
198  pass
199 
200 
201  def ptrace(self):
202  if self.debug == True:
203  traceback.print_exc()
204 
205  @staticmethod
207 
208  dblist = '%-10s %-28s %-28s \n' % ("ID","SERVICE","SCHEMA")
209  dblist += '---------------------------------------------------------------\n'
210  for ldb in CalibDbInfo.databases.keys():
211  if ldb == CalibDbInfo.calibdb:
212  isdefault = "(Default)"
213  else:
214  isdefault = ""
215  if ldb not in CalibDbInfo.db.keys():
216  schema = "undefined"
217  else:
218  schema = CalibDbInfo.db[ldb]
219  dblist += "%-10s %-28s %-28s %s \n" % (ldb,CalibData.service_re.search(CalibDbInfo.databases[ldb]).group(1),schema,isdefault)
220  return dblist
221 
222  def setdb(self,dbid):
223  if dbid in CalibDbInfo.databases:
224  self.database = dbid
225  self.sitename = CalibDbInfo.sitename[self.database]
226  self.tnsname = CalibDbInfo.databases[self.database]
227  self.replica = CalibDbInfo.replica[self.database]
228  self.service = self.service_re.search(self.tnsname).group(1)
229 
230  if self.service == None:
231  raise Exception("SERVICE_NAME not found in databases[%s]" % dbid)
232  else:
233  raise Exception("Tried to set CalibData object to unknown database %s" % (dbid))
234 
235  def dbgout(self, output):
236  if self.debug:
237  print (output + '\n')
238 
239  # returns False if no class attributes in list are unset
240  def missing_attributes(self,checkvars):
241  required = dict( (name,getattr(self,name)) for name in checkvars)
242  for varname,var in required.items():
243  if var == None:
244  return varname
245  return False
246 
247  def userstrings(self,access='read',schema='meta'):
248  var = { 'write': 'dbw', 'read': 'dbr' }
249  rval = { }
250  attr = var[access]
251  passattr = var[access] + '_password'
252  proxyattr = var[access] + '_proxy'
253  obj = getattr(CalibDbInfo,attr)
254  passobj = getattr(CalibDbInfo,passattr)
255  proxyobj = getattr(CalibDbInfo,proxyattr)
256  rval['password'] = passobj[self.database]
257 
258  if schema == 'meta' or (schema == 'data' and access == 'read'):
259  rval['user'] = obj[self.database]
260  try:
261  rval['user'] += '[' + proxyobj[self.database] + ']'
262  except KeyError:
263  pass
264  return rval
265 
266  if schema == 'data':
267  if self.dschema == None:
268  self.set_active_db()
269  try:
270  # see if there is an _proxy array with index for this database. If so
271  # continue on to set the user using queried writer_account under proxy user
272  # if exception, use queried writer account as-is.
273  # we are assuming that if the meta table writer used a proxy account then so does this one (info is not discoverable from the database)
274  test = proxyobj[self.database]
275  rval['user'] = obj[self.database] + '[' + self.writer_account + ']'
276  except KeyError:
277  rval['user'] = self.writer_account
278  pass
279 
280  return rval
281 
282  # access can be 'read' or 'write'
283  # there is a potential bug here - if a read connection is requested after a write connection
284  # we will close the write connection with uncommitted data to open the read connection
285  # It's ok as long as we are careful not to do anything in that order inside the class (users of class shouldn't be able to cause it)
286  def connect(self,access='read',schema='meta'):
287  try:
288  userinfo = self.userstrings(access,schema)
289  userstring = userinfo['user']
290  userpass = userinfo['password']
291  cstring = userstring + "/" + userpass + "@" + self.tnsname
292 
293  dbname = CalibDbInfo.db[self.database] if schema == 'meta' else 'queried data schema'
294 
295  dbgstring = '%s %s connection to %s requested - doing nothing and keeping existing open' % (access,schema,dbname)
296  dbgstring2 = 'New connection required - closing %s schema database connection' % schema
297  dbgstring3 = 'Opening connection with connection information: %s' % cstring
298 
299  if schema == 'meta':
300  if self.mconn != None:
301  if self.metauser == userstring:
302  self.dbgout(dbgstring)
303  return
304  self.dbgout(dbgstring2)
305  self.mconn.close()
306 
307  self.dbgout(dbgstring3)
308  self.mconn = cx_Oracle.connect(cstring)
309  self.mcursor = self.mconn.cursor()
310  self.metauser = userstring
311  self.mschema = CalibDbInfo.db[self.database]
312  self.set_active_db()
313 
314  elif schema == 'data':
315  # it is possible the same user is used for writing both schemas - don't open a new connection in that case
316  if self.metauser == userstring and self.mconn != None:
317  self.dconn = self.mconn
318  self.dcursor = self.dconn.cursor()
319  self.datauser = userstring
320 
321  if self.dconn != None:
322  if self.datauser == userstring:
323  self.dbgout(dbgstring)
324  return
325  self.dbgout(dbgstring2)
326  self.dconn.close()
327 
328  self.dbgout(dbgstring3)
329  self.dconn = cx_Oracle.connect(cstring)
330  self.dcursor = self.dconn.cursor()
331  self.datauser = userstring
332 
333  except Exception as exc:
334  output = "There was an error connecting to the database "
335  if self.mschema != None and schema == "meta":
336  output += "Schema: %s" % self.mschema
337  if self.dschema != None and schema == "data":
338  output += "Schema: %s" % self.dschema
339  output += "\n"
340  if isinstance(exc,cx_Oracle.Error):
341  error, = exc.args
342  output += error.message
343  elif isinstance(exc,KeyError):
344  output += "There is configuration missing for database identifier '%s' - check CalibDbInfo.py" % self.database
345  else:
346  output += exc.args[0]
347  raise CalibDataError(self,output)
348 
349  # only necessary to call this when creating new head_id entries, existing head_id will populate active_schema and writer_account when we retrieve it
350  # note that when retrieving an existing head_id, active_schema in this object doesn't necessarily == database active_schema
351  def set_active_db(self):
352  schema_sql = self.mschema + '.MDT_DATA_SCHEMA'
353  sql = "select WRITER_ACCOUNT,SCHEMA_NAME from %s where ACTIVE = 1 " % (schema_sql)
354 
355  try:
356  self.dbgout("SQL in set_active_db: %s" % sql)
357  self.mcursor.execute(sql)
358  except Exception as exc:
359  raise QueryError(self,exc)
360 
361  result = self.mcursor.fetchone()
362 
363  if result:
364  self.active_schema = result[1]
365  self.writer_account = result[0]
366 
367  def timestart(self):
368  self.timelock = True
369  self.timer['wtime'] = time.time()
370  self.timer['ptime'] = time.clock()
371 
372  def timestop(self):
373  if self.timelock == False:
374  raise Exception("Called timestop without calling timestart")
375  self.timelock = False
376  welap = time.time() - self.timer['wtime']
377  pelap = time.clock() - self.timer['ptime']
378  self.dbtime['wtime'] += welap
379  self.dbtime['ptime'] += pelap
380 
381  # what I think I really want to do here is write an "exec" function that timestarts/timestops for all operations and takes as argument what type of op to exec
382  # so if necessary it can adjust SQL or other to stay within transaction limits. What I have is good enough for now, just a bit redundant.
383 
384  def exec_delete(self,sql,ctype="data"):
385  loop = True
386  rowcount = 0
387  self.timestart()
388  if self.limit == True:
389  sql_del_limit = " AND ROWNUM < %s" % (self.maxops + 1)
390  else:
391  sql_del_limit = ""
392  while loop:
393  esql = sql + sql_del_limit
394  self.dbgout("SQL executed in exec_delete: %s" % esql)
395  if ctype == "data":
396  self.dcursor.execute(esql)
397  if self.dcursor.rowcount > 0:
398  self.opcount += self.dcursor.rowcount
399  rowcount = self.dcursor.rowcount
400  elif ctype == "meta":
401  self.mcursor.execute(esql)
402  if self.mcursor.rowcount > 0:
403  self.opcount += self.mcursor.rowcount
404  rowcount = self.mcursor.rowcount
405  if rowcount > 0:
406  rowcount = 0
407  self.commit()
408  loop = True
409  else:
410  loop = False
411  self.timestop()
412 
413  def exec_insert(self,sql,ctype="data"):
414  self.timestart()
415  if ctype == "data":
416  self.dcursor.execute(sql)
417  if self.dcursor.rowcount > 0:
418  self.opcount += self.dcursor.rowcount
419  elif ctype == "meta":
420  self.mcursor.execute(sql)
421  if self.mcursor.rowcount > 0:
422  self.opcount += self.mcursor.rowcount
423  # commit only commits if opcount is >= maxops
424  self.commit()
425  self.timestop()
426 
427  def check_opcount(self):
428  if self.opcount >= self.maxops and self.limit == True:
429  if self.debug:
430  confirm = "y"
431  print ("Ready to commit %s operations" % self.opcount)
432  confirm = raw_input("Enter 'y' or 'yes' if you wish to continue with operation: ")
433  if confirm not in ("y","yes","Y"):
434  raise Exception("User cancelled operation")
435  return True
436  else:
437  return False
438 
439  def commit(self,force=False):
440  did_commit = False
441 
442  try:
443  if self.mconn != None and self.replica == False:
444  if self.check_opcount() or force:
445  self.mconn.commit()
446  did_commit = True
447  self.mcursor = self.mconn.cursor()
448  if self.dconn != None:
449  if self.check_opcount() or force:
450  self.dconn.commit()
451  did_commit = True
452  self.dcursor = self.dconn.cursor()
453  if did_commit:
454  self.optotal += self.opcount
455  self.opcount = 0
456  self.transtotal += 1
457 
458  except Exception as exc:
459  raise CalibDataError(self,exc)
460 
461  def rollback(self):
462  try:
463  if self.mconn != None:
464  if self.mcursor.rowcount > 0:
465  self.opcount += self.mcursor.rowcount
466  self.mcursor.close()
467  self.mconn.rollback()
468  self.mcursor = self.mconn.cursor()
469  if self.dconn != None:
470  if self.dcursor.rowcount > 0:
471  self.opcount += self.dcursor.rowcount
472  try:
473  self.dcursor.close()
474  except cx_Oracle.InterfaceError as exc:
475  # sometimes the cursor isn't open if we didn't do anything - not a problem I need to know about
476  if exc == 'not open':
477  pass
478  self.dconn.rollback()
479  self.dcursor = self.dconn.cursor()
480  except Exception as exc:
481  raise CalibDataError(self,exc)
482 
483  # split (if necessary) id argument like headid@database into id and database. returns "None" if no database or None id provided
484  def parse_id_arg(self,id):
485  db = None
486 
487  if id == None:
488  return id,db
489 
490  list = str(id).split('@',1)
491 
492  id = list[0]
493 
494  if len(list) > 1:
495  db = list[1]
496 
497  return id,db
498 
499  # service is service attribute from source object used in copy operation
500  def setup_dblink(self,srcdb,service):
501 
502  if srcdb != self.database:
503  # The need for @loopback comes up in testing when we copy between databases on the same server. Will not be allowed without specifying @loopback.
504  if service == self.service:
505  lb = '@loopback'
506  else:
507  lb = ''
508 
509  linkid = service + lb
510 
511  dblink_sql = "CREATE DATABASE LINK %s CONNECT TO %s identified by %s using '%s'" % (linkid, CalibDbInfo.dbr[srcdb],CalibDbInfo.dbr_password[srcdb],CalibDbInfo.databases[srcdb])
512  check_sql = "SELECT DB_LINK from USER_DB_LINKS where lower(DB_LINK) like '%s%s'" % (linkid,'%');
513 
514  if self.linkid == None:
515  try:
516  self.connect("write","meta")
517  self.dbgout ("SQL in setup_dblink: %s" % check_sql)
518  self.mcursor.execute(check_sql)
519  self.linkid = self.mcursor.fetchone()
520  if self.linkid != None:
521  self.drop_dblink()
522  self.dbgout("SQL in setup_dblink: %s" % dblink_sql)
523  self.mcursor.execute(dblink_sql)
524  self.linkid = linkid
525  except Exception as exc:
526  raise CalibDataError(self,exc)
527 
528  def drop_dblink(self):
529  if self.linkid != None:
530  drop_sql = "DROP DATABASE LINK %s" % self.linkid
531  self.dbgout("Deleting database link")
532  self.connect("write","meta")
533  try:
534  self.dbgout("SQL in drop_dblink: %s" % drop_sql)
535  self.mcursor.execute(drop_sql)
536  except Exception as exc:
537  raise CalibDataError(self,exc)
538 
539  def format_headinfo(self):
540  info = "Implementation name: %s \n" % self.implementation
541  info += "Lower Time IOV: %s, %s epoch seconds \n" % (self.lowtime_string, self.lowtime)
542  info += "Upper Time IOV: %s, %s epoch seconds \n" % (self.uptime_string, self.uptime)
543  info += "Lower Run IOV: %s \n" % self.lowrun
544  info += "Upper Run IOV: %s \n" % self.uprun
545  info += "Luminosity: %s \n" % self.luminosity
546  info += "Rootfile: %s \n" % self.rootfile
547  info += "Sitename: %s \n" % self.sitename
548  info += "Insert Time: %s \n" % self.insert_time
549  info += "Status Flag: %s \n" % self.statusflag
550  return info
551 
552  # this is meant to be used to extract some valid tube/chamber identifier from any given string containing one. Intended use is for uploading
553  # from batches of files where each filename identifies a tube
554  @staticmethod
555  def extract_tubestring(fullstring):
556  mid = MuonFixedIdUnpack()
557  hardware_tubestring = mid.online_re.search(fullstring)
558  software_tubestring = mid.offline_re.search(fullstring)
559  numeric_tubestring = mid.numeric_re.search(fullstring)
560 
561  if (hardware_tubestring):
562  return hardware_tubestring.group()
563  if (software_tubestring):
564  return software_tubestring.group()
565  if (numeric_tubestring):
566  # make sure it is not actually a date
567  datematch = re.compile('(19|20)\d\d(0[1-9]|1[012])(0[1-9]|[12][0-9]|3[01])')
568  if not datematch.search(fullstring):
569  return numeric_tubestring.group()
570 
571  return False
572 
573  @staticmethod
574  def convert_tube(tube):
575  exepath = sys.path[0] + "/muonfixedid"
576  retval = subprocess.Popen([exepath,tube], stdout=subprocess.PIPE).stdout.read()
577  if "ERROR" in retval:
578  print (retval)
579  raise Exception
580  else:
581  return retval
582 
583  @staticmethod
584  def get_tube_id(tube):
585  mid=MuonFixedIdUnpack(tube)
586  return mid.identifier
587 
588  @staticmethod
589  def get_tube_string(tube):
590  try:
591  mid = MuonFixedIdUnpack(tube)
592  string = "%s_%s_%s" % (mid.stationNameString(),mid.stationPhi(),mid.stationEta())
593  return string
594  except ValueError:
595  # will throw exception if format doesn't match regex
596  MuonFixedIdUnpack(tube)
597  return tube
598 
599  def get_head_info(self):
600  self.connect("read","meta")
601  sql = "select head.head_id, head.implementation, head.data_schema, sh.SCHEMA_NAME, head.lowrun,head.uprun,head.lowtime,head.uptime, head.luminosity, head.rootfile, head.site_name, head.insert_time, head.status_flag, sh.WRITER_ACCOUNT, sh.ACTIVE, sh.ARCHIVED, sh.ARCHIVE_CONNECTION_STRING from %s.MDT_HEAD head, %s.MDT_DATA_SCHEMA sh where head.DATA_SCHEMA = sh.SCHEMA_NAME AND " % (self.mschema,self.mschema)
602 
603  sql += "head.HEAD_ID = %s " % self.head_id
604 
605  sql += "ORDER BY head.head_id DESC"
606 
607  self.dbgout("SQL from get_head_info: %s" % sql)
608 
609  try:
610  self.mcursor.execute(sql)
611  except Exception as exc:
612  raise QueryError(self,exc)
613 
614  result = self.mcursor.fetchone()
615  if result:
616  self.head_id = result[0]
617  self.implementation = result[1]
618  self.dschema = result[2]
619  self.calibdbwriter = result[3]
620  self.lowrun = result[4]
621  self.uprun = result[5]
622  self.lowtime = result[6]
623  self.uptime = result[7]
624  self.luminosity = result[8]
625  self.rootfile = result[9]
626  self.sitename = result[10]
627  # the result will be a python datetime object which I am converting to the format oracle would expect if we use this value in an INSERT statement
628  self.insert_time = self.convert_datetime_oracle(result[11])
629  self.statusflag = result[12]
630  # this value is used in connection strings
631  self.calibdbwriter = result[13]
632  # this value may be altered and used to write to MDT_DATA_SCHEMA table
633  self.writer_account = result[13]
634  self.schema_active = result[14]
635  self.schema_archived = result[15]
636  if result[16] == None:
638  else:
639  self.schema_archive_connection_string = result[16]
640 
641  if self.statusflag == None:
642  self.statusflag = "NULL"
643  else:
644  raise QueryError(self,"No results")
645 
646  self.lowtime_string = datetime.datetime.fromtimestamp(self.lowtime)
647  self.uptime_string = datetime.datetime.fromtimestamp(self.uptime)
648 
649  def convert_datetime_oracle(self,dtobj):
650  #current_time = datetime.datetime.utcnow().strftime("%d-%b-%y %I.%M.%S.%f %p %z")
651  if isinstance(dtobj,datetime.datetime):
652  return dtobj.strftime('%d-%b-%y %I.%M.%S.%f %p %z').upper()
653  else:
654  raise Exception("Not a datetime object")
655 
656  def get_t0(self,tube_id=None,chamber_id=None):
657  # **** I have no idea if this really works anymore
658  self.connect("read","data")
659  sql = "SELECT T.chamber, T.tube_id, T.P4, T.ADC_1, T.validflag, T.nhits_above_adc_cut, V.algo_flag,V.chisquare_1,V.P4_err,V.P5,V.P5_err,V.P0,C.P0_ERR,V.P6,V.p6_err, V.tube_grouping from"+self.dschema + ".MDT_TUBE T, " + self.dschema + ".MDT_TUBE_V V," + self.dschema + ".MDT_TUBE_C C where T.head_id = :hid AND C.head_id = :hid AND V.head_id = :hid"
660  if chamber_id != None:
661  sql += " AND t.chamber = :cid AND C.tube_id IN (SELECT tube_id from %s.MDT_TUBE T WHERE T.chamber = :cid) AND V.tube_id IN (SELECT tube_id from %s.MDT_TUBE T WHERE T.chamber = :cid)"
662  if tube_id != None:
663  sql += " AND T.tube_id = :tid AND C.tube_id = :tid AND V.tube_id = :tid"
664 
665  self.dbgout("SQL in get_tzero: %s" % sql)
666 
667  try:
668  self.dcursor.execute(sql,{'hid':self.head_id,'cid':chamber_id,'tid':tube_id})
669  except Exception as exc:
670  raise QueryError(self,exc)
671 
672  rowcount = 0
673  for r in self.cursor.fetchall():
674  self.tzeros[rowcount] = r
675  rowcount+=1
676 
677  def get_rt(self,chamber_id=None):
678  self.connect("read","data")
679  sql = "SELECT HEAD_ID, REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINSFROM %s.MDT_RT" % (self.dschema )
680  if chamber_id != None:
681  sql += " AND region_id = :rid"
682  try:
683  self.dcursor.execute(sql,{'rid':chamber_id})
684  except Exception as exc:
685  raise QueryError(self,exc)
686  rowcount = 0
687  for r in self.cursor.fetchall():
688  self.rt[rowcount] = r
689  rowcount+=1
690 
691  def check_copy_args(self,sourceobj,fn):
692  if not isinstance(sourceobj,CalibData):
693  raise Exception("Called %s without providing copy source CalibData object" % fn)
694  if sourceobj.mschema == None:
695  raise Exception("CalibData object passed to %s not initialized with get_head_info() method" % fn)
696 
697  # used either in copy to replica or to write a new schema entry as independent operation
698  def write_data_schema(self):
699  if self.dschema == None or self.writer_account == None or self.schema_active == None or self.schema_archived == None or self.schema_archive_connection_string == None:
700  raise Exception("Required members in CalibData object not set, cannot check or write data schema.\nself.dschema: %s, self.writer_account: %s, self.schema_active: %s, self.schema_archived: %s self.schema_archive_connection_string: %s" % (self.dschema,self.writer_account,self.schema_active,self.schema_archived,self.schema_archive_connection_string))
701 
702  sql_exists = "SELECT SCHEMA_NAME from %s.MDT_DATA_SCHEMA WHERE SCHEMA_NAME = '%s'" % (self.mschema, self.dschema)
703 
704  sql_insert = "INSERT INTO %s.MDT_DATA_SCHEMA VALUES ('%s','%s',%s,%s,%s)" % (self.mschema,self.dschema,self.writer_account,self.schema_active,self.schema_archived,self.schema_archive_connection_string)
705 
706  self.dbgout("SQL from write_data_schema: %s" % sql_exists)
707 
708  try:
709  self.mcursor.execute(sql_exists)
710  self.mcursor.fetchall()
711  if self.mcursor.rowcount > 0:
712  if self.replica == False:
713  raise Exception(self,"Tried to write new MDT_DATA SCHEMA %s but SCHEMA_NAME exists" % self.dschema)
714  else:
715  return
716  except Exception as e:
717  raise QueryError(self,e)
718 
719 
720  self.dbgout("SQL from write_data_schema: %s" % sql_insert)
721 
722  try:
723  self.mcursor.execute(sql_insert)
724  except Exception as exc:
725  raise HeadInsertError(self,"Unable to insert into MDT_DATA_SCHEMA: %s" % exc)
726 
727 
728  def write_headid(self):
729 
730  required = dict( (name,getattr(self,name)) for name in [ 'implementation', 'lowrun', 'uprun', 'lowtime', 'uptime', 'luminosity', 'rootfile', 'sitename' ])
731  for varname,var in required.items():
732  if var == None:
733  raise HeadInsertError(self,"Write head ID - %s was not set, cannot create new id" % varname)
734 
735  if self.replica and self.head_id == None:
736  raise HeadInsertError(self,"Cannot insert new meta information into replica database because self.head_id is not set")
737 
738  self.connect("write","meta")
739  newid = self.mcursor.var(cx_Oracle.NUMBER)
740  schema = self.mcursor.var(cx_Oracle.STRING)
741 
742  if self.head_id == None or self.replica:
743  if self.head_id == None:
744  self.head_id = 'NULL'
745  if self.dschema == None:
746  self.dschema = 'NULL'
747  if self.insert_time == None:
748  self.insert_time = 'NULL'
749  sql = "INSERT INTO %s.MDT_HEAD(HEAD_ID,IMPLEMENTATION, LOWRUN, UPRUN, LOWTIME, UPTIME, LUMINOSITY, ROOTFILE, SITE_NAME, DATA_SCHEMA,INSERT_TIME,STATUS_FLAG) VALUES(%s,'%s',%s,%s,%s,%s,%s,'%s','%s'," % (self.mschema, self.head_id, self.implementation, self.lowrun, self.uprun, self.lowtime, self.uptime, self.luminosity, self.rootfile,self.sitename)
750  if self.replica:
751  # if we're actually inserting values copied from the source to replica we'll need to quote these strings
752  sql += "'%s','%s'," % (self.dschema,self.insert_time)
753  else:
754  # non-replica (site) calibdbs have a trigger to set schema and insert time so we insert unquoted NULL on those columns
755  sql += "%s,%s," % (self.dschema,self.insert_time)
756  sql += "%s) RETURNING head_id,data_schema into :newid,:schema" % (self.statusflag)
757 
758  update = False
759  else:
760  sql = "UPDATE %s.MDT_HEAD SET IMPLEMENTATION='%s', LOWRUN=%s, UPRUN=%s, LOWTIME=%s, UPTIME=%s, LUMINOSITY=%s,ROOTFILE='%s',SITE_NAME='%s', INSERT_TIME = '%s', STATUS_FLAG=%s WHERE HEAD_ID=%s" % (self.mschema, self.implementation,self.lowrun,self.uprun,self.lowtime,self.uptime,self.luminosity,self.rootfile,self.sitename,self.insert_time,self.status_flag, self.head_id)
761  update = True
762 
763  self.dbgout("SQL from write_headid: %s" % sql)
764  try:
765  if update:
766  self.exec_insert(sql,"meta")
767  else:
768  # not using exec_insert to avoid passing bind variables (this is the only place it comes up and essentially amounts to counting 1 operation so it probably isn't worth a lot of thought)
769  self.timestart()
770  self.mcursor.execute(sql,{'newid':newid, 'schema':schema})
771  self.timestop()
772  if self.mcursor.rowcount > 0:
773  self.opcount += self.mcursor.rowcount
774  self.head_id = int(newid.getvalue())
775  if self.replica == False:
776  self.dschema = schema.getvalue()
777  except Exception as exc:
778  raise HeadInsertError(self,exc)
779 
780  def copy_head(self,sourceobj=None):
781  self.connect("write","meta")
782  self.check_copy_args(sourceobj,"copy_head")
783  fromid = sourceobj.head_id
784  srcdb_id = sourceobj.database
785  self.setup_dblink(srcdb_id, sourceobj.service)
786 
787  if self.replica:
788  self.head_id = sourceobj.head_id
789  self.dschema = CalibDbInfo.schema[self.database]
790  self.insert_time = sourceobj.insert_time
791 
792  self.implementation = sourceobj.implementation
793  self.lowrun = sourceobj.lowrun
794  self.uprun = sourceobj.uprun
795  self.lowtime = sourceobj.lowtime
796  self.uptime = sourceobj.uptime
797  self.luminosity = sourceobj.luminosity
798  self.rootfile = sourceobj.rootfile
799  self.sitename = sourceobj.sitename
800  self.statusflag = sourceobj.statusflag
801 
802  self.schema_active = sourceobj.schema_active
803  self.schema_archived = sourceobj.schema_archived
804  self.schema_archive_connection_string = sourceobj.schema_archive_connection_string
805 
806  #if not self.replica:
807  # self.writer_account = sourceobj.writer_account
808 
809  if self.replica:
810  self.write_data_schema()
811 
812  self.write_headid()
813 
814  if self.replica:
815  if int(self.head_id) != int(sourceobj.head_id):
816  raise HeadInsertError(self,"Destination is configured as 'replica' but got new head_id %s from insert when expecting id %s - cannot replicate head_id from source. Check your CalibDbInfo.py configuration." % (self.head_id, sourceobj.head_id))
817 
818  def copy_rt(self,sourceobj,chamber=None,ignore_missing=False):
819  self.check_copy_args(sourceobj,"copy_rt")
820 
821  fromid = sourceobj.head_id
822  srcdb_id = sourceobj.database
823  self.setup_dblink(srcdb_id,sourceobj.service)
824 
825  self.connect("write","data")
826 
827  if self.head_id == None and self.linkid == None:
828  raise Exception("Cannot copy RT in same database without setting head_id for CalibData object")
829  elif self.replica == False or self.linkid == None:
830  id_select = self.head_id
831  else:
832  id_select = "head_id"
833 
834  schema_sql = "%s.MDT_RT" % sourceobj.dschema
835  schema_sql_map = schema_sql + "_MAP"
836 
837  if self.linkid != None:
838  schema_sql += "@%s" % self.linkid
839  schema_sql_map += "@%s" % self.linkid
840 
841  sql_rt = "INSERT INTO %s.MDT_RT (HEAD_ID, REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINS) SELECT %s,REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINS FROM %s WHERE HEAD_ID = %s" % (self.dschema, id_select, schema_sql, fromid)
842 
843  sql_rt_map = "INSERT INTO %s.MDT_RT_MAP(HEAD_ID, REGION_ID,CALIBFLAG,POINT_NR,S,T,R) SELECT %s,REGION_ID,CALIBFLAG,POINT_NR,S,T,R FROM %s WHERE HEAD_ID = %s" % (self.dschema, id_select, schema_sql_map, fromid)
844 
845  if chamber != None:
846  chamber_id = self.get_tube_id(chamber)
847  chamber_sql = " AND region_id = %s" % chamber_id
848  sql_rt += chamber_sql
849  sql_rt_map += chamber_sql
850 
851  self.dbgout("SQL in copy_rt: %s" % sql_rt_map)
852 
853  try:
854  self.dbgout("SQL in copy_rt: %s" % sql_rt)
855  self.exec_insert(sql_rt)
856  self.dbgout("SQL in copy_rt: %s" % sql_rt_map)
857  self.exec_insert(sql_rt_map)
858  if self.dcursor.rowcount == 0 and ignore_missing == False:
859  raise DataCopyError(self,"There is no RT data in the source head id")
860  except cx_Oracle.IntegrityError as exc:
861  raise DataUniqueError(self,exc)
862  except HeadInsertError:
863  raise
864  except Exception as exc:
865  raise DataCopyError(self,exc)
866 
867  # copy existing T0 to this object. new head_id must exist (use write_headid if object created with empty headid)
868  def copy_tzero(self,sourceobj=None,chamber=None,tube=None,ignore_missing=False):
869 
870  self.check_copy_args(sourceobj,"copy_tzero_sql")
871 
872  fromid = sourceobj.head_id
873  srcdb_id = sourceobj.database
874  self.setup_dblink(srcdb_id,sourceobj.service)
875 
876  self.connect("write","data")
877 
878  if self.head_id == None and self.linkid == None:
879  raise Exception("Cannot copy tzero in same database without setting head_id for CalibData object")
880  elif self.replica == False or self.linkid == None:
881  id_select = self.head_id
882  else:
883  id_select = "head_id"
884 
885  schema_sql = "%s.MDT_TUBE" % sourceobj.dschema
886  schema_sql_tv = schema_sql + "_V"
887  schema_sql_tc = schema_sql + "_C"
888 
889  if self.linkid != None:
890  schema_sql += "@%s" % self.linkid
891  schema_sql_tv += "@%s" % self.linkid
892  schema_sql_tc += "@%s" % self.linkid
893 
894  mdt_tube_sql = "INSERT INTO %s.MDT_TUBE SELECT tube_id,%s,chamber,calibflag,validflag,nhits,nhits_above_adc_cut,p4,adc_0,adc_1,adc_2,adc_3,insert_time FROM %s WHERE head_id = %s" % (self.dschema,id_select,schema_sql,fromid)
895 
896  mdt_tube_v_sql = "INSERT INTO %s.MDT_TUBE_V (head_id,tube_id,calibflag, chisquare_1, chisquare_2, p0, p1, p2, p3, p5, p6, p7, p8, p9, p4_err, p5_err, p6_err, algo_flag, entries, tube_grouping) SELECT %s, tube_id,calibflag, chisquare_1, chisquare_2, p0, p1, p2, p3, p5, p6, p7, p8, p9, p4_err, p5_err, p6_err, algo_flag, entries, tube_grouping FROM %s WHERE head_id = %s" % (self.dschema,id_select,schema_sql_tv,fromid)
897 
898  mdt_tube_c_sql = "INSERT INTO %s.MDT_TUBE_C (head_id, tube_id,calibflag, p0_err, p1_err, p2_err, p3_err, p7_err, p8_err, p9_err, cov_1, cov_2, cov_3, cov_4, adc_0_err, adc_1_err, adc_2_err, adc_3_err, adc_chisquare, adc_pedestal, adc_pedestal_width) SELECT %s,tube_id,calibflag, p0_err, p1_err, p2_err, p3_err, p7_err, p8_err, p9_err, cov_1, cov_2, cov_3, cov_4, adc_0_err, adc_1_err, adc_2_err, adc_3_err, adc_chisquare, adc_pedestal, adc_pedestal_width FROM %s WHERE head_id = %s" % (self.dschema,id_select,schema_sql_tc,fromid)
899 
900  if chamber != None and tube == None:
901  chamber_id = self.get_tube_id(chamber)
902  chamber_sql = " AND chamber = %s" % chamber_id
903  chamber_tube_sql = " AND tube_id IN (SELECT tube_id FROM %s WHERE chamber=%s)" % (schema_sql,chamber_id)
904  mdt_tube_sql += chamber_sql
905  mdt_tube_v_sql += chamber_tube_sql
906  mdt_tube_c_sql += chamber_tube_sql
907  elif tube != None and chamber == None:
908  tube_id = self.get_tube_id(tube)
909  tube_sql = " AND tube_id = %s" % tube_id
910  mdt_tube_sql += tube_sql
911  mdt_tube_v_sql += tube_sql
912  mdt_tube_c_sql += tube_sql
913  elif tube != None and chamber != None:
914  raise DataCopyError(self,"Cannot operate on both chamber and single tube")
915 
916  self.dbgout("SQL in copy_tzero: %s" % mdt_tube_sql)
917  self.dbgout("SQL in copy_tzero: %s" % mdt_tube_c_sql)
918  self.dbgout("SQL in copy_tzero: %s" % mdt_tube_v_sql)
919 
920  try:
921  #if chamber == None and tube == None:
922  # self.copy_head(sourceobj)
923  self.exec_insert(mdt_tube_sql)
924  if self.dcursor.rowcount == 0 and ignore_missing == False:
925  raise DataCopyError(self,"There is no T0 data in the source head id")
926  self.exec_insert(mdt_tube_c_sql)
927  self.exec_insert(mdt_tube_v_sql)
928  except cx_Oracle.IntegrityError as exc:
929  raise DataUniqueError(self,exc)
930  except HeadInsertError:
931  raise
932  except cx_Oracle.Error as exc:
933  raise DataCopyError(self,exc)
934 
935  def insert_rt_map(self,point_nr, r, t, s):
936  self.connect("write","data")
937  sql_map = "INSERT INTO %s.MDT_RT_MAP(HEAD_ID, REGION_ID,CALIBFLAG,POINT_NR,S,T,R) VALUES (%s,%s,%s,%s,%s,%s,%s)" % ( self.dschema, self.head_id,self.regionid,self.calibflag,point_nr,s,t,r)
938  if self.debug == True:
939  self.dbgout("SQL from insert_rt_map: %s" % sql_map)
940  try:
941  if point_nr == 99 or point_nr == 199:
942  tmax = float(t)
943  if tmax > 800. or tmax < 700.:
944  raise Exception("Tmax value %s: Tmax must be in range 700 to 800 ns" % t)
945  self.exec_insert(sql_map)
946  except cx_Oracle.IntegrityError as exc:
947  raise DataUniqueError(self,exc)
948  except Exception as exc:
949  raise MapInsertError(self,exc)
950 
951  def insert_rt(self,regionid):
952  if self.type == None:
953  raise RTInsertError(self,"Cannot insert RT until you set object 'type' variable")
954 
955  self.connect("write","data")
956  sql_rt = "INSERT INTO %s.MDT_RT (HEAD_ID, REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINS) VALUES (%s,%s,%s,%s,%s,'%s',%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'%s',%s)" % (self.dschema,self.head_id,regionid,self.calibflag,self.lowrun,self.uprun,self.histogram,self.n_segs,self.aver_angle,self.delta_aver_angle,self.aver_spread,self.delta_aver_spread,self.convergence,self.b_aver,self.t_aver,self.hv,self.curr,self.gas,self.validflag,self.type,self.bins)
957 
958  self.dbgout("SQL from insert_rt: %s" % sql_rt)
959 
960  try:
961  self.regionid = regionid
962  self.exec_insert(sql_rt)
963  except cx_Oracle.IntegrityError as exc:
964  raise DataUniqueError(self,exc)
965  except Exception as exc:
966  raise RTInsertError(self,exc)
967 
968  def insert_adc(self,tube_id,chamber_id,nhits,adc_0,adc_0_err,adc_1,adc_1_err,adc_2,adc_2_err,adc_3,adc_3_err,adc_chisquare):
969  self.connect("write","data")
970  sql_tube = 'INSERT INTO %s.MDT_TUBE (HEAD_ID,TUBE_ID,CHAMBER,VALIDFLAG,NHITS,NHITS_ABOVE_ADC_CUT,P4,ADC_0,ADC_1,ADC_2,ADC_3) VALUES (%s,%s,%s,0,%s,%s,%s,%s,%s,%s,%s)' % (self.dschema,self.head_id,tube_id,chamber_id,self.validflag,nhits,nhits,adc_0,adc_1,adc_2,adc_3)
971  sql_tube_c = "INSERT INTO %s.MDT_TUBE_C (HEAD_ID,TUBE_ID,ADC_0_ERR,ADC_1_ERR,ADC_2_ERR,ADC_3_ERR,ADC_CHISQUARE) VALUES (%s,%s,%s,%s,%s,%s,%s)" % (self.dschema,self.head_id,tube_id, adc_0_err, adc_1_err, adc_2_err, adc_3_err, adc_chisquare)
972 
973  if self.debug == True:
974  self.dbgout("SQL from insert_adc: %s" % sql_tube)
975  self.dbgout("SQL from insert_adc: %s" % sql_tube_c)
976 
977  try:
978  self.exec_insert(sql_tube)
979  self.exec_insert(sql_tube_c)
980  except cx_Oracle.IntegrityError as exc:
981  raise DataUniqueError(self,exc)
982  except Exception as exc:
983  raise ADCInsertError(self,exc)
984 
985  def insert_t0(self,tube_id,chamber_id, tzero,avg_adc,calibflag,stats,chi2,t0err,tmax,tmax_err,noise,noise_err,slope,slope_err):
986  # ,tzero,avg_adc,calibflag,stats,chi2,t0err,tmax,tmax_err,noise,noise_err,slope,slope_err
987  self.connect("write","data")
988 
989  sql_tube = "INSERT INTO %s.MDT_TUBE (HEAD_ID,TUBE_ID,CALIBFLAG,CHAMBER,P4,ADC_1,VALIDFLAG,NHITS,NHITS_ABOVE_ADC_CUT) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)" % (self.dschema,self.head_id,tube_id,calibflag,chamber_id,tzero,avg_adc,self.validflag,stats,stats)
990 
991  #V.algo_flag,V.chisquare_1,V.P4_err,V.P5,V.P5_err,V.P0,V.P6,V.p6_err, V.tube_grouping)
992  sql_tube_c = "INSERT INTO %s.MDT_TUBE_C (HEAD_ID,TUBE_ID,CALIBFLAG,P0_ERR) VALUES (%s,%s,%s,%s)" % (self.dschema,self.head_id,tube_id,calibflag,noise_err)
993  sql_tube_v = "INSERT INTO %s.MDT_TUBE_V (HEAD_ID,TUBE_ID,CALIBFLAG,CHISQUARE_1,P4_ERR,P5,P5_ERR,P0,P6,P6_ERR,ALGO_FLAG,TUBE_GROUPING) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'%s','%s')" % (self.dschema,self.head_id,tube_id,calibflag,chi2,t0err,tmax,tmax_err,noise,slope,slope_err,self.type,self.t0_tube_grouping)
994 
995  if self.debug == True:
996  self.dbgout("SQL from insert_t0: %s" % sql_tube)
997  self.dbgout("SQL from insert_t0: %s" % sql_tube_c)
998  self.dbgout("SQL from insert_t0: %s" % sql_tube_v)
999 
1000  try:
1001  self.exec_insert(sql_tube)
1002  self.exec_insert(sql_tube_c)
1003  self.exec_insert(sql_tube_v)
1004  except cx_Oracle.IntegrityError as exc:
1005  raise DataUniqueError(self,exc)
1006  except Exception as exc:
1007  raise T0InsertError(self,exc)
1008 
1009  # returns true on success or else throws DeleteError
1010  # I split the mdt_head and mdt_rt/mdt_rt_map just to be more flexible, no commit happens during this function
1011  def delete_head_id(self):
1012  self.connect("write","meta")
1013  sql = "DELETE FROM %s.MDT_HEAD WHERE HEAD_ID=%s and IMPLEMENTATION='%s'" % ( self.mschema, self.head_id, self.implementation)
1014 
1015  self.dbgout("SQL from delete_head_id: %s" % sql)
1016 
1017  try:
1018  self.exec_delete(sql,"meta")
1019  except Exception as exc:
1020  raise DeleteError(self,exc)
1021 
1022  def delete_rt(self,region=None):
1023  self.connect("write","data")
1024  sql_rt_map = "DELETE FROM %s.MDT_RT_MAP WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1025  sql_rt = "DELETE FROM %s.MDT_RT WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1026 
1027  if region != None:
1028  # aka as chamber_id
1029  region_id = self.get_tube_id(region)
1030  sql_rt_map += " AND REGION_ID=%s" % region_id
1031  sql_rt += " AND REGION_ID=%s" % region_id
1032  try:
1033  self.exec_delete(sql_rt_map)
1034  self.exec_delete(sql_rt)
1035  except Exception as exc:
1036  raise DeleteError(self,exc)
1037 
1038  def delete_tube(self,tube=None,chamber=None):
1039  self.connect("write","data")
1040  sql_tube_v = "DELETE FROM %s.MDT_TUBE_V WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1041  sql_tube_c = "DELETE FROM %s.MDT_TUBE_C WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1042  sql_tube = "DELETE FROM %s.MDT_TUBE WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1043  if chamber != None:
1044  chamber_id = self.get_tube_id(chamber)
1045  sql_tube += " AND CHAMBER=%s" % chamber_id
1046  chamber_tube_sql = " AND tube_id IN (SELECT tube_id FROM %s.MDT_TUBE WHERE chamber=%s)" % (self.dschema,chamber_id)
1047  sql_tube_v += chamber_tube_sql
1048  sql_tube_c += chamber_tube_sql
1049  elif tube != None and chamber == None:
1050  tube_id = self.get_tube_id(tube)
1051  sql_tube_v += " AND TUBE_ID=%s" % tube_id
1052  sql_tube_c += " AND TUBE_ID=%s" % tube_id
1053  sql_tube += " AND TUBE_ID=%s" % tube_id
1054 
1055  self.dbgout("SQL from delete_t0: %s" % sql_tube_v)
1056  self.dbgout("SQL from delete_t0: %s" % sql_tube_c)
1057  self.dbgout("SQL from delete_t0: %s" % sql_tube)
1058 
1059  try:
1060  self.exec_delete(sql_tube_v)
1061  self.exec_delete(sql_tube_c)
1062  self.exec_delete(sql_tube)
1063  except Exception as exc:
1064  raise DeleteError(self,exc)
1065 
1066  def set_rt_valid(self,tube=None):
1067  self.connect("write","data")
1068  sql_valid = "UPDATE %s.MDT_RT SET VALIDFLAG=3 WHERE HEAD_ID=%s " % (self.dschema, self.head_id)
1069  if tube != None:
1070  sql_valid += "AND REGION_ID=%s" % tube
1071 
1072  self.dbgout("SQL from set_rt_valid: %s" % sql_valid)
1073 
1074  try:
1075  self.exec_insert(sql_valid)
1076  except Exception as exc:
1077  raise UpdateError(self,exc)
1078 
1079  def set_t0_valid(self,tube=None):
1080  self.connect("write","data")
1081  sql_valid = "UPDATE %s.MDT_TUBE SET VALIDFLAG=3 WHERE HEAD_ID=%s " % (self.dschema, self.head_id)
1082  if tube != None:
1083  sql_valid += "AND TUBE_ID=%s" % tube
1084 
1085  self.dbgout("SQL from set_t0_valid: %s" % sql_valid)
1086 
1087  try:
1088  self.exec_insert(sql_valid)
1089  except Exception as exc:
1090  raise DeleteError(self,exc)
1091 
1092  def fetch_head_data(self):
1093  self.connect()
1094  sql = "SELECT HEAD_ID,IMPLEMENTATION,LOWRUN,UPRUN,LOWTIME,UPTIME,LUMINOSITY,SITE_NAME,INSERT_TIME FROM %s.MDT_HEAD ORDER BY HEAD_ID DESC" % (self.mschema)
1095 
1096  self.dbgout("SQL from fetch_head_data: %s" % sql)
1097 
1098  try:
1099  self.mcursor.execute(sql)
1100  except Exception as exc:
1101  raise QueryError(self,exc)
1102  #"%-8s\t%-30s\t%-8s\t%-8s\t%-10s\t%-10s\t%-2s\t%-2s\t%-28s\n"
1103  #"%-8s%-30s%-8s%-8s%-10s%-10s%-2s%-2s%-28s\n"
1104  head_row = self.mcursor.fetchone()
1105  head_data_rows = []
1106  while head_row:
1107  head_data_rows.append(head_row)
1108  head_row = self.mcursor.fetchone()
1109  return head_data_rows
1110 
1111  def format_head_data(self,head_data_rows):
1112  text = "%-10s%-35s%-10s%-10s%-12s%-12s%-5s%-6s%-30s\n" % ('HEAD_ID','IMPLEMENTATION','LOWRUN','UPRUN','LOWTIME','UPTIME','LUM','SITE','INSERT TIME')
1113  for data in head_data_rows:
1114  text += "%-10s%-35s%-10s%-10s%-12s%-12s%-5s%-6s%-30s\n" % (data[0],data[1],data[2],data[3],data[4],data[5],data[6],data[7],data[8])
1115  return text
1116 
1117 
1118 
1119 
1120 
1121 
1122 
CalibDataClass.CalibData.convert_datetime_oracle
def convert_datetime_oracle(self, dtobj)
Definition: CalibDataClass.py:649
CalibDataClass.CalibData.insert_adc
def insert_adc(self, tube_id, chamber_id, nhits, adc_0, adc_0_err, adc_1, adc_1_err, adc_2, adc_2_err, adc_3, adc_3_err, adc_chisquare)
Definition: CalibDataClass.py:968
CalibDataClass.CalibData.validflag
validflag
Definition: CalibDataClass.py:152
beamspotnt.var
var
Definition: bin/beamspotnt.py:1394
CalibDataClass.CalibData.exec_insert
def exec_insert(self, sql, ctype="data")
Definition: CalibDataClass.py:413
CalibDataClass.CalibData.set_t0_valid
def set_t0_valid(self, tube=None)
Definition: CalibDataClass.py:1079
CalibDataClass.CalibData.copy_head
def copy_head(self, sourceobj=None)
Definition: CalibDataClass.py:780
CalibDataClass.CalibData.debug
debug
Definition: CalibDataClass.py:83
CalibDataClass.CalibData.setup_dblink
def setup_dblink(self, srcdb, service)
Definition: CalibDataClass.py:500
CalibDataClass.CalibData.timestart
def timestart(self)
Definition: CalibDataClass.py:367
CalibDataClass.CalibData.transtotal
transtotal
Definition: CalibDataClass.py:65
CalibDataClass.CalibData.ptrace
def ptrace(self)
Definition: CalibDataClass.py:201
CalibDataClass.CalibData.delete_head_id
def delete_head_id(self)
Definition: CalibDataClass.py:1011
CalibDataClass.CalibData.hv
hv
Definition: CalibDataClass.py:149
CalibDataClass.CalibData.dbtime
dbtime
Definition: CalibDataClass.py:67
CaloCellPos2Ntuple.int
int
Definition: CaloCellPos2Ntuple.py:24
CalibDataClass.CalibDataError.args
args
Definition: CalibDataClass.py:39
CalibDataClass.DataUniqueError
Definition: CalibDataClass.py:50
CalibDataClass.CalibData.commit
def commit(self, force=False)
Definition: CalibDataClass.py:439
CalibDataClass.CalibData.statusflag
statusflag
Definition: CalibDataClass.py:129
CalibDataClass.UpdateError
Definition: CalibDataClass.py:49
CalibDataClass.CalibData.aver_angle
aver_angle
Definition: CalibDataClass.py:142
CalibDataClass.MapInsertError
Definition: CalibDataClass.py:46
MuonFixedIdUnpack
Definition: MuonFixedIdUnpack.py:1
CalibDataClass.CalibDataError.__init__
def __init__(self, caller, exc=None)
Definition: CalibDataClass.py:25
CalibDataClass.CalibData.check_opcount
def check_opcount(self)
Definition: CalibDataClass.py:427
CalibDataClass.CalibData.format_head_data
def format_head_data(self, head_data_rows)
Definition: CalibDataClass.py:1111
CalibDataClass.CalibData.insert_time
insert_time
Definition: CalibDataClass.py:137
CalibDataClass.CalibData.check_copy_args
def check_copy_args(self, sourceobj, fn)
Definition: CalibDataClass.py:691
CalibDataClass.CalibData.insert_rt_map
def insert_rt_map(self, point_nr, r, t, s)
Definition: CalibDataClass.py:935
CalibDataClass.CalibData.lowtime
lowtime
Definition: CalibDataClass.py:102
CalibDataClass.CalibData.delta_aver_spread
delta_aver_spread
Definition: CalibDataClass.py:145
CalibDataClass.CalibData.linkid
linkid
Definition: CalibDataClass.py:181
CalibDataClass.CalibData.database
database
Definition: CalibDataClass.py:224
CalibDataClass.CalibData.delete_tube
def delete_tube(self, tube=None, chamber=None)
Definition: CalibDataClass.py:1038
CalibDataClass.CalibData.format_headinfo
def format_headinfo(self)
Definition: CalibDataClass.py:539
CalibDataClass.CalibData.insert_t0
def insert_t0(self, tube_id, chamber_id, tzero, avg_adc, calibflag, stats, chi2, t0err, tmax, tmax_err, noise, noise_err, slope, slope_err)
Definition: CalibDataClass.py:985
CalibDataClass.CalibData.write_data_schema
def write_data_schema(self)
Definition: CalibDataClass.py:698
CalibDataClass.CalibData.exec_delete
def exec_delete(self, sql, ctype="data")
Definition: CalibDataClass.py:384
CalibDataClass.CalibData.rootfile
rootfile
Definition: CalibDataClass.py:124
CalibDataClass.CalibData.t0_tube_grouping
t0_tube_grouping
Definition: CalibDataClass.py:157
upper
int upper(int c)
Definition: LArBadChannelParser.cxx:49
CalibDataClass.CalibData.rollback
def rollback(self)
Definition: CalibDataClass.py:461
CalibDataClass.DeleteError
Definition: CalibDataClass.py:47
CalibDataClass.CalibData.uptime
uptime
Definition: CalibDataClass.py:111
CalibDataClass.CalibData.datauser
datauser
Definition: CalibDataClass.py:319
CalibDataClass.CalibData.write_headid
def write_headid(self)
Definition: CalibDataClass.py:728
search
void search(TDirectory *td, const std::string &s, std::string cwd, node *n)
recursive directory search for TH1 and TH2 and TProfiles
Definition: hcg.cxx:738
CalibDataClass.CalibData.dschema
dschema
Definition: CalibDataClass.py:177
CalibDataClass.CalibData.schema_archive_connection_string
schema_archive_connection_string
Definition: CalibDataClass.py:171
CalibDataClass.CalibData.__del__
def __del__(self)
Definition: CalibDataClass.py:183
CalibDataClass.CalibData.schema_active
schema_active
Definition: CalibDataClass.py:169
CalibDataClass.CalibData.get_head_info
def get_head_info(self)
Definition: CalibDataClass.py:599
LArG4FSStartPointFilterLegacy.execute
execute
Definition: LArG4FSStartPointFilterLegacy.py:20
CalibDataClass.CalibData.connect
def connect(self, access='read', schema='meta')
Definition: CalibDataClass.py:286
CalibDataClass.CalibData.tzeros
tzeros
Definition: CalibDataClass.py:158
CalibDataClass.CalibData.format_dblist
def format_dblist()
Definition: CalibDataClass.py:206
CalibDataClass.CalibData.replica
replica
Definition: CalibDataClass.py:189
CalibDataClass.CalibData.fetch_head_data
def fetch_head_data(self)
Definition: CalibDataClass.py:1092
MuonFixedIdUnpack
CalibDataClass.CalibData.luminosity
luminosity
Definition: CalibDataClass.py:119
CalibDataClass.CalibData.userstrings
def userstrings(self, access='read', schema='meta')
Definition: CalibDataClass.py:247
CalibDataClass.CalibData.set_active_db
def set_active_db(self)
Definition: CalibDataClass.py:351
CalibDataClass.CalibData.get_tube_string
def get_tube_string(tube)
Definition: CalibDataClass.py:589
CalibDataClass.CalibData.set_rt_valid
def set_rt_valid(self, tube=None)
Definition: CalibDataClass.py:1066
CalibDataClass.CalibData.service_re
service_re
Definition: CalibDataClass.py:54
CalibDataClass.HeadInsertError
Definition: CalibDataClass.py:42
CalibDataClass.CalibData.get_rt
def get_rt(self, chamber_id=None)
Definition: CalibDataClass.py:677
CalibDataClass.CalibData.n_segs
n_segs
Definition: CalibDataClass.py:141
CalibDataClass.CalibData.limit
limit
Definition: CalibDataClass.py:80
CalibDataClass.CalibData.uprun
uprun
Definition: CalibDataClass.py:91
CalibDataClass.CalibData.optotal
optotal
Definition: CalibDataClass.py:63
CalibDataClass.CalibData.mconn
mconn
Definition: CalibDataClass.py:172
CalibDataClass.CalibData.tnsname
tnsname
Definition: CalibDataClass.py:226
CalibDataClass.CalibData.sitename
sitename
Definition: CalibDataClass.py:225
CalibDataClass.CalibData.mschema
mschema
Definition: CalibDataClass.py:176
CalibDataClass.CalibData.lowtime_string
lowtime_string
Definition: CalibDataClass.py:101
CalibDataClass.ADCInsertError
Definition: CalibDataClass.py:45
CalibDataClass.CalibData.histogram
histogram
Definition: CalibDataClass.py:140
CalibDataClass.CalibData.copy_tzero
def copy_tzero(self, sourceobj=None, chamber=None, tube=None, ignore_missing=False)
Definition: CalibDataClass.py:868
CalibDataClass.T0InsertError
Definition: CalibDataClass.py:44
CalibDataClass.CalibData.active_schema
active_schema
Definition: CalibDataClass.py:364
CalibDataClass.CalibData.missing_attributes
def missing_attributes(self, checkvars)
Definition: CalibDataClass.py:240
CalibDataClass.CalibData.insert_rt
def insert_rt(self, regionid)
Definition: CalibDataClass.py:951
CalibDataClass.CalibData.unixts
unixts
Definition: CalibDataClass.py:56
CalibDataClass.CalibData.b_aver
b_aver
Definition: CalibDataClass.py:147
CalibDataClass.CalibData.parse_id_arg
def parse_id_arg(self, id)
Definition: CalibDataClass.py:484
CalibDataClass.CalibData.timestop
def timestop(self)
Definition: CalibDataClass.py:372
CalibDataClass.CalibData.uptime_string
uptime_string
Definition: CalibDataClass.py:110
CalibDataClass.CalibData.curr
curr
Definition: CalibDataClass.py:150
print
void print(char *figname, TCanvas *c1)
Definition: TRTCalib_StrawStatusPlots.cxx:25
CalibDataClass.CalibData.bins
bins
Definition: CalibDataClass.py:153
CalibDataClass.CalibData.calibflag
calibflag
Definition: CalibDataClass.py:136
CalibDataClass.CalibData.dconn
dconn
Definition: CalibDataClass.py:173
CalibDataClass.CalibData.mcursor
mcursor
Definition: CalibDataClass.py:175
CalibDataClass.CalibData.gas
gas
Definition: CalibDataClass.py:151
CalibDataClass.CalibData.opcount
opcount
Definition: CalibDataClass.py:61
CalibDataClass.CalibData.dbuser
dbuser
Definition: CalibDataClass.py:179
CalibDataClass.CalibData.delete_rt
def delete_rt(self, region=None)
Definition: CalibDataClass.py:1022
CalibDataClass.CalibData.lowrun
lowrun
Definition: CalibDataClass.py:86
CalibDataClass.CalibData.delta_aver_angle
delta_aver_angle
Definition: CalibDataClass.py:143
CalibDataClass.CalibData.implementation
implementation
Definition: CalibDataClass.py:133
CalibDataClass.CalibData.head_id
head_id
Definition: CalibDataClass.py:162
CalibDataClass.RTInsertError
Definition: CalibDataClass.py:43
query_example.cursor
cursor
Definition: query_example.py:21
CaloLCW_tf.group
group
Definition: CaloLCW_tf.py:28
CalibDataClass.CalibData.metauser
metauser
Definition: CalibDataClass.py:301
CalibDataClass.CalibData.__init__
def __init__(self, head_id=None, impl=None, lr=None, ur=None, lt=None, ut=None, luminosity=None, rootfile=None, statusflag=None)
Definition: CalibDataClass.py:58
CalibDataClass.CalibData.convergence
convergence
Definition: CalibDataClass.py:146
CalibDataClass.CalibData.calibdbwriter
calibdbwriter
Definition: CalibDataClass.py:619
CalibDataClass.CalibData.regionid
regionid
Definition: CalibDataClass.py:961
str
Definition: BTagTrackIpAccessor.cxx:11
CalibDataClass.CalibData.get_tube_id
def get_tube_id(tube)
Definition: CalibDataClass.py:584
CalibDataClass.CalibData.dbgout
def dbgout(self, output)
Definition: CalibDataClass.py:235
CalibDataClass.CalibData.writer_account
writer_account
Definition: CalibDataClass.py:178
CalibDataClass.CalibData.drop_dblink
def drop_dblink(self)
Definition: CalibDataClass.py:528
CalibDataClass.CalibData.extract_tubestring
def extract_tubestring(fullstring)
Definition: CalibDataClass.py:555
CalibDataClass.CalibData.timelock
timelock
Definition: CalibDataClass.py:71
CalibDataClass.CalibData.service
service
Definition: CalibDataClass.py:228
CalibDataClass.CalibData.t_aver
t_aver
Definition: CalibDataClass.py:148
CalibDataClass.CalibData.type
type
Definition: CalibDataClass.py:135
CalibDataClass.CalibData.setdb
def setdb(self, dbid)
Definition: CalibDataClass.py:222
CalibDataClass.CalibData.aver_spread
aver_spread
Definition: CalibDataClass.py:144
CalibDataClass.CalibData.dcursor
dcursor
Definition: CalibDataClass.py:174
CalibDataClass.QueryError
Definition: CalibDataClass.py:48
CalibDataClass.CalibData
Definition: CalibDataClass.py:53
CalibDataClass.CalibData.copy_rt
def copy_rt(self, sourceobj, chamber=None, ignore_missing=False)
Definition: CalibDataClass.py:818
readCCLHist.float
float
Definition: readCCLHist.py:83
CalibDataClass.CalibData.get_t0
def get_t0(self, tube_id=None, chamber_id=None)
Definition: CalibDataClass.py:656
CalibDataClass.CalibData.convert_tube
def convert_tube(tube)
Definition: CalibDataClass.py:574
Trk::split
@ split
Definition: LayerMaterialProperties.h:38
match
bool match(std::string s1, std::string s2)
match the individual directories of two strings
Definition: hcg.cxx:356
CalibDataClass.CalibData.schema_archived
schema_archived
Definition: CalibDataClass.py:170
CalibDataClass.CalibData.maxops
maxops
Definition: CalibDataClass.py:81
CalibDataClass.CalibData.timer
timer
Definition: CalibDataClass.py:69
CalibDataClass.CalibDataError
Definition: CalibDataClass.py:24
CalibDataClass.CalibData.proxyuser
proxyuser
Definition: CalibDataClass.py:180
CalibDataClass.DataCopyError
Definition: CalibDataClass.py:51