ATLAS Offline Software
CalibDataClass.py
Go to the documentation of this file.
1 #!/bin/env python
2 
3 
4 import cx_Oracle
5 import sys
6 import traceback
7 import time
8 import datetime
9 import calendar
10 import re
11 import os
12 import subprocess
13 
14 # database configuration info
15 import CalibDbInfo
16 
17 # if ATLAS env is initialized import from there but fallback to copy in working dir if available
18 try:
20 except ImportError:
21  from MuonFixedIdUnpack import *
22 
23 class CalibDataError(Exception):
24  def __init__(self, caller, exc=None):
25  if caller.debug == True:
26  caller.ptrace()
27  if isinstance(exc,cx_Oracle.Error):
28  error, = exc.args
29  if isinstance(error,str):
30  self.args += error,
31  else:
32  self.args += error.message,
33  return
34  if isinstance(exc,str):
35  self.args += exc,
36  return
37  if isinstance(exc,Exception):
38  self.args = exc.args
39  return
40 
51 
52 class CalibData:
53  service_re = re.compile(r'\‍(SERVICE_NAME\s*\=\s*([A-Za-z.]+)\‍)+')
54  # unix timestamp
55  unixts = re.compile('[0-9]{10,11}')
56 
57  def __init__(self,head_id=None,impl=None,lr=None,ur=None,lt=None,ut=None,luminosity=None,rootfile=None,statusflag=None):
58 
59  # counter of operations not yet committed
60  self.opcount = 0
61  # committed operations
62  self.optotal = 0
63  # increments when transaction is committed
64  self.transtotal = 0
65  # total time for database ops (seconds)
66  self.dbtime = { 'wtime': 0, 'ptime': 0 }
67  #internal timer used to calculate timing intervals
68  self.timer = { 'wtime':0, 'ptime': 0 }
69  # variable to "lock" timer if engaged
70  self.timelock = False
71 
72  # break operations up into transactions less than maxops?
73  # this really only fully applies to delete and batch insert operations from files (not t0 or rt copying operations).
74  # For a copy operation, if maxops is less than the number of rows inserted into a single table at once then
75  # it will cause operations to be broken up into a single transaction for each insert (so insert into MDT_TUBE, commit, then MDT_TUBE_C, commit, etc)
76  # It's always been broken for the case of batch copies, and there doesn't seem any point to fixing it now since we've done
77  # away with Oracle streams and don't need the per-transaction row limiting enabled - in fact breaking any mass insert/copy up into multiple
78  # transactions is certainly a bad idea which would never have happened except for the issues that large transactions cause with Oracle Streams.
79  self.limit = False
80  self.maxops = 10000
81 
82  self.debug = False
83 
84  if lr == None:
85  self.lowrun = 1
86  else:
87  self.lowrun = lr
88 
89  if ur == None:
90  self.uprun = 999999
91  else:
92  self.uprun = ur
93 
94  if lt == None:
95  lt = "19700101 00:00:00"
96  if ut == None:
97  ut = "20690721 00:37:33"
98 
99  if self.unixts.match(str(lt)) != None:
100  self.lowtime_string = datetime.datetime.fromtimestamp(lt)
101  self.lowtime = lt
102  else:
103  lt = lt.replace("-","")
104  lowtime_converted = time.strptime(lt, "%Y%m%d %H:%M:%S")
105  self.lowtime = calendar.timegm(lowtime_converted)
106  self.lowtime_string = lt
107 
108  if self.unixts.match(str(ut)) != None:
109  self.uptime_string = datetime.datetime.fromtimestamp(ut)
110  self.uptime = ut
111  else:
112  ut = ut.replace("-","")
113  uptime_converted = time.strptime(ut, "%Y%m%d %H:%M:%S")
114  self.uptime = calendar.timegm(uptime_converted)
115  self.uptime_string = ut
116 
117  if luminosity == None:
118  self.luminosity = 1.0
119  else:
120  self.luminosity = luminosity
121 
122  if rootfile == None:
123  self.rootfile = "calib_fit.root"
124  else:
125  self.rootfile = rootfile
126 
127  if statusflag == None:
128  self.statusflag = "NULL"
129  else:
130  self.statusflag = statusflag
131 
132  self.implementation = impl
133 
134  self.type = None
135  self.calibflag = 0
136  self.insert_time = None
137 
138  # static rt "dummy" vars
139  self.histogram = ""
140  self.n_segs = 10000
141  self.aver_angle = 0
143  self.aver_spread = 9.99989986
144  self.delta_aver_spread = 9.99989986
145  self.convergence = 0
146  self.b_aver = 0
147  self.t_aver = 0
148  self.hv = 0
149  self.curr = 0
150  self.gas = 0
151  self.validflag = 3
152  self.bins = 100
153 
154  # t0 related
155  # this can't be global, i think
156  self.t0_tube_grouping = None
157  self.tzeros = []
158 
159  id,db = self.parse_id_arg(head_id)
160 
161  self.head_id = id
162 
163  if db != None:
164  self.setdb(db)
165  else:
166  self.setdb(CalibDbInfo.calibdb)
167 
168  self.schema_active = None
169  self.schema_archived = None
171  self.mconn = None
172  self.dconn = None
173  self.dcursor = None
174  self.mcursor = None
175  self.mschema = None
176  self.dschema = None
177  self.writer_account = None
178  self.dbuser = None
179  self.proxyuser = True
180  self.linkid = None
181 
182  def __del__(self):
183  self.drop_dblink()
184  if self.dconn != None:
185  if self.debug == True:
186  print("Closing calibration database connection")
187  self.dconn.close()
188  if self.mconn != None and self.replica == False:
189  if self.debug == True:
190  print("Closing metadata database connection")
191  try:
192  self.mconn.close()
193  except cx_Oracle.InterfaceError as exc:
194  # IF there is only one consolidated schema then dconn and mconn point to the same connection
195  # ignore a 'not connected' error from trying to close that connection twice
196  if exc == 'not connected':
197  pass
198 
199 
200  def ptrace(self):
201  if self.debug == True:
202  traceback.print_exc()
203 
204  @staticmethod
206 
207  dblist = '%-10s %-28s %-28s \n' % ("ID","SERVICE","SCHEMA")
208  dblist += '---------------------------------------------------------------\n'
209  for ldb in CalibDbInfo.databases.keys():
210  if ldb == CalibDbInfo.calibdb:
211  isdefault = "(Default)"
212  else:
213  isdefault = ""
214  if ldb not in CalibDbInfo.db.keys():
215  schema = "undefined"
216  else:
217  schema = CalibDbInfo.db[ldb]
218  dblist += "%-10s %-28s %-28s %s \n" % (ldb,CalibData.service_re.search(CalibDbInfo.databases[ldb]).group(1),schema,isdefault)
219  return dblist
220 
221  def setdb(self,dbid):
222  if dbid in CalibDbInfo.databases:
223  self.database = dbid
224  self.sitename = CalibDbInfo.sitename[self.database]
225  self.tnsname = CalibDbInfo.databases[self.database]
226  self.replica = CalibDbInfo.replica[self.database]
227  self.service = self.service_re.search(self.tnsname).group(1)
228 
229  if self.service == None:
230  raise Exception("SERVICE_NAME not found in databases[%s]" % dbid)
231  else:
232  raise Exception("Tried to set CalibData object to unknown database %s" % (dbid))
233 
234  def dbgout(self, output):
235  if self.debug:
236  print (output + '\n')
237 
238  # returns False if no class attributes in list are unset
239  def missing_attributes(self,checkvars):
240  required = dict( (name,getattr(self,name)) for name in checkvars)
241  for varname,var in required.items():
242  if var == None:
243  return varname
244  return False
245 
246  def userstrings(self,access='read',schema='meta'):
247  var = { 'write': 'dbw', 'read': 'dbr' }
248  rval = { }
249  attr = var[access]
250  passattr = var[access] + '_password'
251  proxyattr = var[access] + '_proxy'
252  obj = getattr(CalibDbInfo,attr)
253  passobj = getattr(CalibDbInfo,passattr)
254  proxyobj = getattr(CalibDbInfo,proxyattr)
255  rval['password'] = passobj[self.database]
256 
257  if schema == 'meta' or (schema == 'data' and access == 'read'):
258  rval['user'] = obj[self.database]
259  try:
260  rval['user'] += '[' + proxyobj[self.database] + ']'
261  except KeyError:
262  pass
263  return rval
264 
265  if schema == 'data':
266  if self.dschema == None:
267  self.set_active_db()
268  try:
269  # see if there is an _proxy array with index for this database. If so
270  # continue on to set the user using queried writer_account under proxy user
271  # if exception, use queried writer account as-is.
272  # we are assuming that if the meta table writer used a proxy account then so does this one (info is not discoverable from the database)
273  test = proxyobj[self.database]
274  rval['user'] = obj[self.database] + '[' + self.writer_account + ']'
275  except KeyError:
276  rval['user'] = self.writer_account
277  pass
278 
279  return rval
280 
281  # access can be 'read' or 'write'
282  # there is a potential bug here - if a read connection is requested after a write connection
283  # we will close the write connection with uncommitted data to open the read connection
284  # It's ok as long as we are careful not to do anything in that order inside the class (users of class shouldn't be able to cause it)
285  def connect(self,access='read',schema='meta'):
286  try:
287  userinfo = self.userstrings(access,schema)
288  userstring = userinfo['user']
289  userpass = userinfo['password']
290  cstring = userstring + "/" + userpass + "@" + self.tnsname
291 
292  dbname = CalibDbInfo.db[self.database] if schema == 'meta' else 'queried data schema'
293 
294  dbgstring = '%s %s connection to %s requested - doing nothing and keeping existing open' % (access,schema,dbname)
295  dbgstring2 = 'New connection required - closing %s schema database connection' % schema
296  dbgstring3 = 'Opening connection with connection information: %s' % cstring
297 
298  if schema == 'meta':
299  if self.mconn != None:
300  if self.metauser == userstring:
301  self.dbgout(dbgstring)
302  return
303  self.dbgout(dbgstring2)
304  self.mconn.close()
305 
306  self.dbgout(dbgstring3)
307  self.mconn = cx_Oracle.connect(cstring)
308  self.mcursor = self.mconn.cursor()
309  self.metauser = userstring
310  self.mschema = CalibDbInfo.db[self.database]
311  self.set_active_db()
312 
313  elif schema == 'data':
314  # it is possible the same user is used for writing both schemas - don't open a new connection in that case
315  if self.metauser == userstring and self.mconn != None:
316  self.dconn = self.mconn
317  self.dcursor = self.dconn.cursor()
318  self.datauser = userstring
319 
320  if self.dconn != None:
321  if self.datauser == userstring:
322  self.dbgout(dbgstring)
323  return
324  self.dbgout(dbgstring2)
325  self.dconn.close()
326 
327  self.dbgout(dbgstring3)
328  self.dconn = cx_Oracle.connect(cstring)
329  self.dcursor = self.dconn.cursor()
330  self.datauser = userstring
331 
332  except Exception as exc:
333  output = "There was an error connecting to the database "
334  if self.mschema != None and schema == "meta":
335  output += "Schema: %s" % self.mschema
336  if self.dschema != None and schema == "data":
337  output += "Schema: %s" % self.dschema
338  output += "\n"
339  if isinstance(exc,cx_Oracle.Error):
340  error, = exc.args
341  output += error.message
342  elif isinstance(exc,KeyError):
343  output += "There is configuration missing for database identifier '%s' - check CalibDbInfo.py" % self.database
344  else:
345  output += exc.args[0]
346  raise CalibDataError(self,output)
347 
348  # only necessary to call this when creating new head_id entries, existing head_id will populate active_schema and writer_account when we retrieve it
349  # note that when retrieving an existing head_id, active_schema in this object doesn't necessarily == database active_schema
350  def set_active_db(self):
351  schema_sql = self.mschema + '.MDT_DATA_SCHEMA'
352  sql = "select WRITER_ACCOUNT,SCHEMA_NAME from %s where ACTIVE = 1 " % (schema_sql)
353 
354  try:
355  self.dbgout("SQL in set_active_db: %s" % sql)
356  self.mcursor.execute(sql)
357  except Exception as exc:
358  raise QueryError(self,exc)
359 
360  result = self.mcursor.fetchone()
361 
362  if result:
363  self.active_schema = result[1]
364  self.writer_account = result[0]
365 
366  def timestart(self):
367  self.timelock = True
368  self.timer['wtime'] = time.time()
369  self.timer['ptime'] = time.clock()
370 
371  def timestop(self):
372  if self.timelock == False:
373  raise Exception("Called timestop without calling timestart")
374  self.timelock = False
375  welap = time.time() - self.timer['wtime']
376  pelap = time.clock() - self.timer['ptime']
377  self.dbtime['wtime'] += welap
378  self.dbtime['ptime'] += pelap
379 
380  # what I think I really want to do here is write an "exec" function that timestarts/timestops for all operations and takes as argument what type of op to exec
381  # so if necessary it can adjust SQL or other to stay within transaction limits. What I have is good enough for now, just a bit redundant.
382 
383  def exec_delete(self,sql,ctype="data"):
384  loop = True
385  rowcount = 0
386  self.timestart()
387  if self.limit == True:
388  sql_del_limit = " AND ROWNUM < %s" % (self.maxops + 1)
389  else:
390  sql_del_limit = ""
391  while loop:
392  esql = sql + sql_del_limit
393  self.dbgout("SQL executed in exec_delete: %s" % esql)
394  if ctype == "data":
395  self.dcursor.execute(esql)
396  if self.dcursor.rowcount > 0:
397  self.opcount += self.dcursor.rowcount
398  rowcount = self.dcursor.rowcount
399  elif ctype == "meta":
400  self.mcursor.execute(esql)
401  if self.mcursor.rowcount > 0:
402  self.opcount += self.mcursor.rowcount
403  rowcount = self.mcursor.rowcount
404  if rowcount > 0:
405  rowcount = 0
406  self.commit()
407  loop = True
408  else:
409  loop = False
410  self.timestop()
411 
412  def exec_insert(self,sql,ctype="data"):
413  self.timestart()
414  if ctype == "data":
415  self.dcursor.execute(sql)
416  if self.dcursor.rowcount > 0:
417  self.opcount += self.dcursor.rowcount
418  elif ctype == "meta":
419  self.mcursor.execute(sql)
420  if self.mcursor.rowcount > 0:
421  self.opcount += self.mcursor.rowcount
422  # commit only commits if opcount is >= maxops
423  self.commit()
424  self.timestop()
425 
426  def check_opcount(self):
427  if self.opcount >= self.maxops and self.limit == True:
428  if self.debug:
429  confirm = "y"
430  print ("Ready to commit %s operations" % self.opcount)
431  confirm = raw_input("Enter 'y' or 'yes' if you wish to continue with operation: ")
432  if confirm not in ("y","yes","Y"):
433  raise Exception("User cancelled operation")
434  return True
435  else:
436  return False
437 
438  def commit(self,force=False):
439  did_commit = False
440 
441  try:
442  if self.mconn != None and self.replica == False:
443  if self.check_opcount() or force:
444  self.mconn.commit()
445  did_commit = True
446  self.mcursor = self.mconn.cursor()
447  if self.dconn != None:
448  if self.check_opcount() or force:
449  self.dconn.commit()
450  did_commit = True
451  self.dcursor = self.dconn.cursor()
452  if did_commit:
453  self.optotal += self.opcount
454  self.opcount = 0
455  self.transtotal += 1
456 
457  except Exception as exc:
458  raise CalibDataError(self,exc)
459 
460  def rollback(self):
461  try:
462  if self.mconn != None:
463  if self.mcursor.rowcount > 0:
464  self.opcount += self.mcursor.rowcount
465  self.mcursor.close()
466  self.mconn.rollback()
467  self.mcursor = self.mconn.cursor()
468  if self.dconn != None:
469  if self.dcursor.rowcount > 0:
470  self.opcount += self.dcursor.rowcount
471  try:
472  self.dcursor.close()
473  except cx_Oracle.InterfaceError as exc:
474  # sometimes the cursor isn't open if we didn't do anything - not a problem I need to know about
475  if exc == 'not open':
476  pass
477  self.dconn.rollback()
478  self.dcursor = self.dconn.cursor()
479  except Exception as exc:
480  raise CalibDataError(self,exc)
481 
482  # split (if necessary) id argument like headid@database into id and database. returns "None" if no database or None id provided
483  def parse_id_arg(self,id):
484  db = None
485 
486  if id == None:
487  return id,db
488 
489  list = str(id).split('@',1)
490 
491  id = list[0]
492 
493  if len(list) > 1:
494  db = list[1]
495 
496  return id,db
497 
498  # service is service attribute from source object used in copy operation
499  def setup_dblink(self,srcdb,service):
500 
501  if srcdb != self.database:
502  # The need for @loopback comes up in testing when we copy between databases on the same server. Will not be allowed without specifying @loopback.
503  if service == self.service:
504  lb = '@loopback'
505  else:
506  lb = ''
507 
508  linkid = service + lb
509 
510  dblink_sql = "CREATE DATABASE LINK %s CONNECT TO %s identified by %s using '%s'" % (linkid, CalibDbInfo.dbr[srcdb],CalibDbInfo.dbr_password[srcdb],CalibDbInfo.databases[srcdb])
511  check_sql = "SELECT DB_LINK from USER_DB_LINKS where lower(DB_LINK) like '%s%s'" % (linkid,'%');
512 
513  if self.linkid == None:
514  try:
515  self.connect("write","meta")
516  self.dbgout ("SQL in setup_dblink: %s" % check_sql)
517  self.mcursor.execute(check_sql)
518  self.linkid = self.mcursor.fetchone()
519  if self.linkid != None:
520  self.drop_dblink()
521  self.dbgout("SQL in setup_dblink: %s" % dblink_sql)
522  self.mcursor.execute(dblink_sql)
523  self.linkid = linkid
524  except Exception as exc:
525  raise CalibDataError(self,exc)
526 
527  def drop_dblink(self):
528  if self.linkid != None:
529  drop_sql = "DROP DATABASE LINK %s" % self.linkid
530  self.dbgout("Deleting database link")
531  self.connect("write","meta")
532  try:
533  self.dbgout("SQL in drop_dblink: %s" % drop_sql)
534  self.mcursor.execute(drop_sql)
535  except Exception as exc:
536  raise CalibDataError(self,exc)
537 
538  def format_headinfo(self):
539  info = "Implementation name: %s \n" % self.implementation
540  info += "Lower Time IOV: %s, %s epoch seconds \n" % (self.lowtime_string, self.lowtime)
541  info += "Upper Time IOV: %s, %s epoch seconds \n" % (self.uptime_string, self.uptime)
542  info += "Lower Run IOV: %s \n" % self.lowrun
543  info += "Upper Run IOV: %s \n" % self.uprun
544  info += "Luminosity: %s \n" % self.luminosity
545  info += "Rootfile: %s \n" % self.rootfile
546  info += "Sitename: %s \n" % self.sitename
547  info += "Insert Time: %s \n" % self.insert_time
548  info += "Status Flag: %s \n" % self.statusflag
549  return info
550 
551  # this is meant to be used to extract some valid tube/chamber identifier from any given string containing one. Intended use is for uploading
552  # from batches of files where each filename identifies a tube
553  @staticmethod
554  def extract_tubestring(fullstring):
555  mid = MuonFixedIdUnpack()
556  hardware_tubestring = mid.online_re.search(fullstring)
557  software_tubestring = mid.offline_re.search(fullstring)
558  numeric_tubestring = mid.numeric_re.search(fullstring)
559 
560  if (hardware_tubestring):
561  return hardware_tubestring.group()
562  if (software_tubestring):
563  return software_tubestring.group()
564  if (numeric_tubestring):
565  # make sure it is not actually a date
566  datematch = re.compile(r'(19|20)\d\d(0[1-9]|1[012])(0[1-9]|[12][0-9]|3[01])')
567  if not datematch.search(fullstring):
568  return numeric_tubestring.group()
569 
570  return False
571 
572  @staticmethod
573  def convert_tube(tube):
574  exepath = sys.path[0] + "/muonfixedid"
575  retval = subprocess.Popen([exepath,tube], stdout=subprocess.PIPE).stdout.read()
576  if "ERROR" in retval:
577  print (retval)
578  raise Exception
579  else:
580  return retval
581 
582  @staticmethod
583  def get_tube_id(tube):
584  mid=MuonFixedIdUnpack(tube)
585  return mid.identifier
586 
587  @staticmethod
588  def get_tube_string(tube):
589  try:
590  mid = MuonFixedIdUnpack(tube)
591  string = "%s_%s_%s" % (mid.stationNameString(),mid.stationPhi(),mid.stationEta())
592  return string
593  except ValueError:
594  # will throw exception if format doesn't match regex
595  MuonFixedIdUnpack(tube)
596  return tube
597 
598  def get_head_info(self):
599  self.connect("read","meta")
600  sql = "select head.head_id, head.implementation, head.data_schema, sh.SCHEMA_NAME, head.lowrun,head.uprun,head.lowtime,head.uptime, head.luminosity, head.rootfile, head.site_name, head.insert_time, head.status_flag, sh.WRITER_ACCOUNT, sh.ACTIVE, sh.ARCHIVED, sh.ARCHIVE_CONNECTION_STRING from %s.MDT_HEAD head, %s.MDT_DATA_SCHEMA sh where head.DATA_SCHEMA = sh.SCHEMA_NAME AND " % (self.mschema,self.mschema)
601 
602  sql += "head.HEAD_ID = %s " % self.head_id
603 
604  sql += "ORDER BY head.head_id DESC"
605 
606  self.dbgout("SQL from get_head_info: %s" % sql)
607 
608  try:
609  self.mcursor.execute(sql)
610  except Exception as exc:
611  raise QueryError(self,exc)
612 
613  result = self.mcursor.fetchone()
614  if result:
615  self.head_id = result[0]
616  self.implementation = result[1]
617  self.dschema = result[2]
618  self.calibdbwriter = result[3]
619  self.lowrun = result[4]
620  self.uprun = result[5]
621  self.lowtime = result[6]
622  self.uptime = result[7]
623  self.luminosity = result[8]
624  self.rootfile = result[9]
625  self.sitename = result[10]
626  # the result will be a python datetime object which I am converting to the format oracle would expect if we use this value in an INSERT statement
627  self.insert_time = self.convert_datetime_oracle(result[11])
628  self.statusflag = result[12]
629  # this value is used in connection strings
630  self.calibdbwriter = result[13]
631  # this value may be altered and used to write to MDT_DATA_SCHEMA table
632  self.writer_account = result[13]
633  self.schema_active = result[14]
634  self.schema_archived = result[15]
635  if result[16] == None:
637  else:
638  self.schema_archive_connection_string = result[16]
639 
640  if self.statusflag == None:
641  self.statusflag = "NULL"
642  else:
643  raise QueryError(self,"No results")
644 
645  self.lowtime_string = datetime.datetime.fromtimestamp(self.lowtime)
646  self.uptime_string = datetime.datetime.fromtimestamp(self.uptime)
647 
648  def convert_datetime_oracle(self,dtobj):
649  #current_time = datetime.datetime.utcnow().strftime("%d-%b-%y %I.%M.%S.%f %p %z")
650  if isinstance(dtobj,datetime.datetime):
651  return dtobj.strftime('%d-%b-%y %I.%M.%S.%f %p %z').upper()
652  else:
653  raise Exception("Not a datetime object")
654 
655  def get_t0(self,tube_id=None,chamber_id=None):
656  # **** I have no idea if this really works anymore
657  self.connect("read","data")
658  sql = "SELECT T.chamber, T.tube_id, T.P4, T.ADC_1, T.validflag, T.nhits_above_adc_cut, V.algo_flag,V.chisquare_1,V.P4_err,V.P5,V.P5_err,V.P0,C.P0_ERR,V.P6,V.p6_err, V.tube_grouping from"+self.dschema + ".MDT_TUBE T, " + self.dschema + ".MDT_TUBE_V V," + self.dschema + ".MDT_TUBE_C C where T.head_id = :hid AND C.head_id = :hid AND V.head_id = :hid"
659  if chamber_id != None:
660  sql += " AND t.chamber = :cid AND C.tube_id IN (SELECT tube_id from %s.MDT_TUBE T WHERE T.chamber = :cid) AND V.tube_id IN (SELECT tube_id from %s.MDT_TUBE T WHERE T.chamber = :cid)"
661  if tube_id != None:
662  sql += " AND T.tube_id = :tid AND C.tube_id = :tid AND V.tube_id = :tid"
663 
664  self.dbgout("SQL in get_tzero: %s" % sql)
665 
666  try:
667  self.dcursor.execute(sql,{'hid':self.head_id,'cid':chamber_id,'tid':tube_id})
668  except Exception as exc:
669  raise QueryError(self,exc)
670 
671  rowcount = 0
672  for r in self.cursor.fetchall():
673  self.tzeros[rowcount] = r
674  rowcount+=1
675 
676  def get_rt(self,chamber_id=None):
677  self.connect("read","data")
678  sql = "SELECT HEAD_ID, REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINSFROM %s.MDT_RT" % (self.dschema )
679  if chamber_id != None:
680  sql += " AND region_id = :rid"
681  try:
682  self.dcursor.execute(sql,{'rid':chamber_id})
683  except Exception as exc:
684  raise QueryError(self,exc)
685  rowcount = 0
686  for r in self.cursor.fetchall():
687  self.rt[rowcount] = r
688  rowcount+=1
689 
690  def check_copy_args(self,sourceobj,fn):
691  if not isinstance(sourceobj,CalibData):
692  raise Exception("Called %s without providing copy source CalibData object" % fn)
693  if sourceobj.mschema == None:
694  raise Exception("CalibData object passed to %s not initialized with get_head_info() method" % fn)
695 
696  # used either in copy to replica or to write a new schema entry as independent operation
697  def write_data_schema(self):
698  if self.dschema == None or self.writer_account == None or self.schema_active == None or self.schema_archived == None or self.schema_archive_connection_string == None:
699  raise Exception("Required members in CalibData object not set, cannot check or write data schema.\nself.dschema: %s, self.writer_account: %s, self.schema_active: %s, self.schema_archived: %s self.schema_archive_connection_string: %s" % (self.dschema,self.writer_account,self.schema_active,self.schema_archived,self.schema_archive_connection_string))
700 
701  sql_exists = "SELECT SCHEMA_NAME from %s.MDT_DATA_SCHEMA WHERE SCHEMA_NAME = '%s'" % (self.mschema, self.dschema)
702 
703  sql_insert = "INSERT INTO %s.MDT_DATA_SCHEMA VALUES ('%s','%s',%s,%s,%s)" % (self.mschema,self.dschema,self.writer_account,self.schema_active,self.schema_archived,self.schema_archive_connection_string)
704 
705  self.dbgout("SQL from write_data_schema: %s" % sql_exists)
706 
707  try:
708  self.mcursor.execute(sql_exists)
709  self.mcursor.fetchall()
710  if self.mcursor.rowcount > 0:
711  if self.replica == False:
712  raise Exception(self,"Tried to write new MDT_DATA SCHEMA %s but SCHEMA_NAME exists" % self.dschema)
713  else:
714  return
715  except Exception as e:
716  raise QueryError(self,e)
717 
718 
719  self.dbgout("SQL from write_data_schema: %s" % sql_insert)
720 
721  try:
722  self.mcursor.execute(sql_insert)
723  except Exception as exc:
724  raise HeadInsertError(self,"Unable to insert into MDT_DATA_SCHEMA: %s" % exc)
725 
726 
727  def write_headid(self):
728 
729  required = dict( (name,getattr(self,name)) for name in [ 'implementation', 'lowrun', 'uprun', 'lowtime', 'uptime', 'luminosity', 'rootfile', 'sitename' ])
730  for varname,var in required.items():
731  if var == None:
732  raise HeadInsertError(self,"Write head ID - %s was not set, cannot create new id" % varname)
733 
734  if self.replica and self.head_id == None:
735  raise HeadInsertError(self,"Cannot insert new meta information into replica database because self.head_id is not set")
736 
737  self.connect("write","meta")
738  newid = self.mcursor.var(cx_Oracle.NUMBER)
739  schema = self.mcursor.var(cx_Oracle.STRING)
740 
741  if self.head_id == None or self.replica:
742  if self.head_id == None:
743  self.head_id = 'NULL'
744  if self.dschema == None:
745  self.dschema = 'NULL'
746  if self.insert_time == None:
747  self.insert_time = 'NULL'
748  sql = "INSERT INTO %s.MDT_HEAD(HEAD_ID,IMPLEMENTATION, LOWRUN, UPRUN, LOWTIME, UPTIME, LUMINOSITY, ROOTFILE, SITE_NAME, DATA_SCHEMA,INSERT_TIME,STATUS_FLAG) VALUES(%s,'%s',%s,%s,%s,%s,%s,'%s','%s'," % (self.mschema, self.head_id, self.implementation, self.lowrun, self.uprun, self.lowtime, self.uptime, self.luminosity, self.rootfile,self.sitename)
749  if self.replica:
750  # if we're actually inserting values copied from the source to replica we'll need to quote these strings
751  sql += "'%s','%s'," % (self.dschema,self.insert_time)
752  else:
753  # non-replica (site) calibdbs have a trigger to set schema and insert time so we insert unquoted NULL on those columns
754  sql += "%s,%s," % (self.dschema,self.insert_time)
755  sql += "%s) RETURNING head_id,data_schema into :newid,:schema" % (self.statusflag)
756 
757  update = False
758  else:
759  sql = "UPDATE %s.MDT_HEAD SET IMPLEMENTATION='%s', LOWRUN=%s, UPRUN=%s, LOWTIME=%s, UPTIME=%s, LUMINOSITY=%s,ROOTFILE='%s',SITE_NAME='%s', INSERT_TIME = '%s', STATUS_FLAG=%s WHERE HEAD_ID=%s" % (self.mschema, self.implementation,self.lowrun,self.uprun,self.lowtime,self.uptime,self.luminosity,self.rootfile,self.sitename,self.insert_time,self.status_flag, self.head_id)
760  update = True
761 
762  self.dbgout("SQL from write_headid: %s" % sql)
763  try:
764  if update:
765  self.exec_insert(sql,"meta")
766  else:
767  # not using exec_insert to avoid passing bind variables (this is the only place it comes up and essentially amounts to counting 1 operation so it probably isn't worth a lot of thought)
768  self.timestart()
769  self.mcursor.execute(sql,{'newid':newid, 'schema':schema})
770  self.timestop()
771  if self.mcursor.rowcount > 0:
772  self.opcount += self.mcursor.rowcount
773  self.head_id = int(newid.getvalue())
774  if self.replica == False:
775  self.dschema = schema.getvalue()
776  except Exception as exc:
777  raise HeadInsertError(self,exc)
778 
779  def copy_head(self,sourceobj=None):
780  self.connect("write","meta")
781  self.check_copy_args(sourceobj,"copy_head")
782  fromid = sourceobj.head_id
783  srcdb_id = sourceobj.database
784  self.setup_dblink(srcdb_id, sourceobj.service)
785 
786  if self.replica:
787  self.head_id = sourceobj.head_id
788  self.dschema = CalibDbInfo.schema[self.database]
789  self.insert_time = sourceobj.insert_time
790 
791  self.implementation = sourceobj.implementation
792  self.lowrun = sourceobj.lowrun
793  self.uprun = sourceobj.uprun
794  self.lowtime = sourceobj.lowtime
795  self.uptime = sourceobj.uptime
796  self.luminosity = sourceobj.luminosity
797  self.rootfile = sourceobj.rootfile
798  self.sitename = sourceobj.sitename
799  self.statusflag = sourceobj.statusflag
800 
801  self.schema_active = sourceobj.schema_active
802  self.schema_archived = sourceobj.schema_archived
803  self.schema_archive_connection_string = sourceobj.schema_archive_connection_string
804 
805  #if not self.replica:
806  # self.writer_account = sourceobj.writer_account
807 
808  if self.replica:
809  self.write_data_schema()
810 
811  self.write_headid()
812 
813  if self.replica:
814  if int(self.head_id) != int(sourceobj.head_id):
815  raise HeadInsertError(self,"Destination is configured as 'replica' but got new head_id %s from insert when expecting id %s - cannot replicate head_id from source. Check your CalibDbInfo.py configuration." % (self.head_id, sourceobj.head_id))
816 
817  def copy_rt(self,sourceobj,chamber=None,ignore_missing=False):
818  self.check_copy_args(sourceobj,"copy_rt")
819 
820  fromid = sourceobj.head_id
821  srcdb_id = sourceobj.database
822  self.setup_dblink(srcdb_id,sourceobj.service)
823 
824  self.connect("write","data")
825 
826  if self.head_id == None and self.linkid == None:
827  raise Exception("Cannot copy RT in same database without setting head_id for CalibData object")
828  elif self.replica == False or self.linkid == None:
829  id_select = self.head_id
830  else:
831  id_select = "head_id"
832 
833  schema_sql = "%s.MDT_RT" % sourceobj.dschema
834  schema_sql_map = schema_sql + "_MAP"
835 
836  if self.linkid != None:
837  schema_sql += "@%s" % self.linkid
838  schema_sql_map += "@%s" % self.linkid
839 
840  sql_rt = "INSERT INTO %s.MDT_RT (HEAD_ID, REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINS) SELECT %s,REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINS FROM %s WHERE HEAD_ID = %s" % (self.dschema, id_select, schema_sql, fromid)
841 
842  sql_rt_map = "INSERT INTO %s.MDT_RT_MAP(HEAD_ID, REGION_ID,CALIBFLAG,POINT_NR,S,T,R) SELECT %s,REGION_ID,CALIBFLAG,POINT_NR,S,T,R FROM %s WHERE HEAD_ID = %s" % (self.dschema, id_select, schema_sql_map, fromid)
843 
844  if chamber != None:
845  chamber_id = self.get_tube_id(chamber)
846  chamber_sql = " AND region_id = %s" % chamber_id
847  sql_rt += chamber_sql
848  sql_rt_map += chamber_sql
849 
850  self.dbgout("SQL in copy_rt: %s" % sql_rt_map)
851 
852  try:
853  self.dbgout("SQL in copy_rt: %s" % sql_rt)
854  self.exec_insert(sql_rt)
855  self.dbgout("SQL in copy_rt: %s" % sql_rt_map)
856  self.exec_insert(sql_rt_map)
857  if self.dcursor.rowcount == 0 and ignore_missing == False:
858  raise DataCopyError(self,"There is no RT data in the source head id")
859  except cx_Oracle.IntegrityError as exc:
860  raise DataUniqueError(self,exc)
861  except HeadInsertError:
862  raise
863  except Exception as exc:
864  raise DataCopyError(self,exc)
865 
866  # copy existing T0 to this object. new head_id must exist (use write_headid if object created with empty headid)
867  def copy_tzero(self,sourceobj=None,chamber=None,tube=None,ignore_missing=False):
868 
869  self.check_copy_args(sourceobj,"copy_tzero_sql")
870 
871  fromid = sourceobj.head_id
872  srcdb_id = sourceobj.database
873  self.setup_dblink(srcdb_id,sourceobj.service)
874 
875  self.connect("write","data")
876 
877  if self.head_id == None and self.linkid == None:
878  raise Exception("Cannot copy tzero in same database without setting head_id for CalibData object")
879  elif self.replica == False or self.linkid == None:
880  id_select = self.head_id
881  else:
882  id_select = "head_id"
883 
884  schema_sql = "%s.MDT_TUBE" % sourceobj.dschema
885  schema_sql_tv = schema_sql + "_V"
886  schema_sql_tc = schema_sql + "_C"
887 
888  if self.linkid != None:
889  schema_sql += "@%s" % self.linkid
890  schema_sql_tv += "@%s" % self.linkid
891  schema_sql_tc += "@%s" % self.linkid
892 
893  mdt_tube_sql = "INSERT INTO %s.MDT_TUBE SELECT tube_id,%s,chamber,calibflag,validflag,nhits,nhits_above_adc_cut,p4,adc_0,adc_1,adc_2,adc_3,insert_time FROM %s WHERE head_id = %s" % (self.dschema,id_select,schema_sql,fromid)
894 
895  mdt_tube_v_sql = "INSERT INTO %s.MDT_TUBE_V (head_id,tube_id,calibflag, chisquare_1, chisquare_2, p0, p1, p2, p3, p5, p6, p7, p8, p9, p4_err, p5_err, p6_err, algo_flag, entries, tube_grouping) SELECT %s, tube_id,calibflag, chisquare_1, chisquare_2, p0, p1, p2, p3, p5, p6, p7, p8, p9, p4_err, p5_err, p6_err, algo_flag, entries, tube_grouping FROM %s WHERE head_id = %s" % (self.dschema,id_select,schema_sql_tv,fromid)
896 
897  mdt_tube_c_sql = "INSERT INTO %s.MDT_TUBE_C (head_id, tube_id,calibflag, p0_err, p1_err, p2_err, p3_err, p7_err, p8_err, p9_err, cov_1, cov_2, cov_3, cov_4, adc_0_err, adc_1_err, adc_2_err, adc_3_err, adc_chisquare, adc_pedestal, adc_pedestal_width) SELECT %s,tube_id,calibflag, p0_err, p1_err, p2_err, p3_err, p7_err, p8_err, p9_err, cov_1, cov_2, cov_3, cov_4, adc_0_err, adc_1_err, adc_2_err, adc_3_err, adc_chisquare, adc_pedestal, adc_pedestal_width FROM %s WHERE head_id = %s" % (self.dschema,id_select,schema_sql_tc,fromid)
898 
899  if chamber != None and tube == None:
900  chamber_id = self.get_tube_id(chamber)
901  chamber_sql = " AND chamber = %s" % chamber_id
902  chamber_tube_sql = " AND tube_id IN (SELECT tube_id FROM %s WHERE chamber=%s)" % (schema_sql,chamber_id)
903  mdt_tube_sql += chamber_sql
904  mdt_tube_v_sql += chamber_tube_sql
905  mdt_tube_c_sql += chamber_tube_sql
906  elif tube != None and chamber == None:
907  tube_id = self.get_tube_id(tube)
908  tube_sql = " AND tube_id = %s" % tube_id
909  mdt_tube_sql += tube_sql
910  mdt_tube_v_sql += tube_sql
911  mdt_tube_c_sql += tube_sql
912  elif tube != None and chamber != None:
913  raise DataCopyError(self,"Cannot operate on both chamber and single tube")
914 
915  self.dbgout("SQL in copy_tzero: %s" % mdt_tube_sql)
916  self.dbgout("SQL in copy_tzero: %s" % mdt_tube_c_sql)
917  self.dbgout("SQL in copy_tzero: %s" % mdt_tube_v_sql)
918 
919  try:
920  #if chamber == None and tube == None:
921  # self.copy_head(sourceobj)
922  self.exec_insert(mdt_tube_sql)
923  if self.dcursor.rowcount == 0 and ignore_missing == False:
924  raise DataCopyError(self,"There is no T0 data in the source head id")
925  self.exec_insert(mdt_tube_c_sql)
926  self.exec_insert(mdt_tube_v_sql)
927  except cx_Oracle.IntegrityError as exc:
928  raise DataUniqueError(self,exc)
929  except HeadInsertError:
930  raise
931  except cx_Oracle.Error as exc:
932  raise DataCopyError(self,exc)
933 
934  def insert_rt_map(self,point_nr, r, t, s):
935  self.connect("write","data")
936  sql_map = "INSERT INTO %s.MDT_RT_MAP(HEAD_ID, REGION_ID,CALIBFLAG,POINT_NR,S,T,R) VALUES (%s,%s,%s,%s,%s,%s,%s)" % ( self.dschema, self.head_id,self.regionid,self.calibflag,point_nr,s,t,r)
937  if self.debug == True:
938  self.dbgout("SQL from insert_rt_map: %s" % sql_map)
939  try:
940  if point_nr == 99 or point_nr == 199:
941  tmax = float(t)
942  if tmax > 800. or tmax < 700.:
943  raise Exception("Tmax value %s: Tmax must be in range 700 to 800 ns" % t)
944  self.exec_insert(sql_map)
945  except cx_Oracle.IntegrityError as exc:
946  raise DataUniqueError(self,exc)
947  except Exception as exc:
948  raise MapInsertError(self,exc)
949 
950  def insert_rt(self,regionid):
951  if self.type == None:
952  raise RTInsertError(self,"Cannot insert RT until you set object 'type' variable")
953 
954  self.connect("write","data")
955  sql_rt = "INSERT INTO %s.MDT_RT (HEAD_ID, REGION_ID,CALIBFLAG,LOWRUN,UPRUN,HISTOGRAM,N_SEGS,AVER_ANGLE,DELTA_AVER_ANGLE,AVER_SPREAD,DELTA_AVER_SPREAD,CONVERGENCE,B_AVER,T_AVER,HV,CURR,GAS,VALIDFLAG,ALGO_FLAG,BINS) VALUES (%s,%s,%s,%s,%s,'%s',%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'%s',%s)" % (self.dschema,self.head_id,regionid,self.calibflag,self.lowrun,self.uprun,self.histogram,self.n_segs,self.aver_angle,self.delta_aver_angle,self.aver_spread,self.delta_aver_spread,self.convergence,self.b_aver,self.t_aver,self.hv,self.curr,self.gas,self.validflag,self.type,self.bins)
956 
957  self.dbgout("SQL from insert_rt: %s" % sql_rt)
958 
959  try:
960  self.regionid = regionid
961  self.exec_insert(sql_rt)
962  except cx_Oracle.IntegrityError as exc:
963  raise DataUniqueError(self,exc)
964  except Exception as exc:
965  raise RTInsertError(self,exc)
966 
967  def insert_adc(self,tube_id,chamber_id,nhits,adc_0,adc_0_err,adc_1,adc_1_err,adc_2,adc_2_err,adc_3,adc_3_err,adc_chisquare):
968  self.connect("write","data")
969  sql_tube = 'INSERT INTO %s.MDT_TUBE (HEAD_ID,TUBE_ID,CHAMBER,VALIDFLAG,NHITS,NHITS_ABOVE_ADC_CUT,P4,ADC_0,ADC_1,ADC_2,ADC_3) VALUES (%s,%s,%s,0,%s,%s,%s,%s,%s,%s,%s)' % (self.dschema,self.head_id,tube_id,chamber_id,self.validflag,nhits,nhits,adc_0,adc_1,adc_2,adc_3)
970  sql_tube_c = "INSERT INTO %s.MDT_TUBE_C (HEAD_ID,TUBE_ID,ADC_0_ERR,ADC_1_ERR,ADC_2_ERR,ADC_3_ERR,ADC_CHISQUARE) VALUES (%s,%s,%s,%s,%s,%s,%s)" % (self.dschema,self.head_id,tube_id, adc_0_err, adc_1_err, adc_2_err, adc_3_err, adc_chisquare)
971 
972  if self.debug == True:
973  self.dbgout("SQL from insert_adc: %s" % sql_tube)
974  self.dbgout("SQL from insert_adc: %s" % sql_tube_c)
975 
976  try:
977  self.exec_insert(sql_tube)
978  self.exec_insert(sql_tube_c)
979  except cx_Oracle.IntegrityError as exc:
980  raise DataUniqueError(self,exc)
981  except Exception as exc:
982  raise ADCInsertError(self,exc)
983 
984  def insert_t0(self,tube_id,chamber_id, tzero,avg_adc,calibflag,stats,chi2,t0err,tmax,tmax_err,noise,noise_err,slope,slope_err):
985  # ,tzero,avg_adc,calibflag,stats,chi2,t0err,tmax,tmax_err,noise,noise_err,slope,slope_err
986  self.connect("write","data")
987 
988  sql_tube = "INSERT INTO %s.MDT_TUBE (HEAD_ID,TUBE_ID,CALIBFLAG,CHAMBER,P4,ADC_1,VALIDFLAG,NHITS,NHITS_ABOVE_ADC_CUT) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)" % (self.dschema,self.head_id,tube_id,calibflag,chamber_id,tzero,avg_adc,self.validflag,stats,stats)
989 
990  #V.algo_flag,V.chisquare_1,V.P4_err,V.P5,V.P5_err,V.P0,V.P6,V.p6_err, V.tube_grouping)
991  sql_tube_c = "INSERT INTO %s.MDT_TUBE_C (HEAD_ID,TUBE_ID,CALIBFLAG,P0_ERR) VALUES (%s,%s,%s,%s)" % (self.dschema,self.head_id,tube_id,calibflag,noise_err)
992  sql_tube_v = "INSERT INTO %s.MDT_TUBE_V (HEAD_ID,TUBE_ID,CALIBFLAG,CHISQUARE_1,P4_ERR,P5,P5_ERR,P0,P6,P6_ERR,ALGO_FLAG,TUBE_GROUPING) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'%s','%s')" % (self.dschema,self.head_id,tube_id,calibflag,chi2,t0err,tmax,tmax_err,noise,slope,slope_err,self.type,self.t0_tube_grouping)
993 
994  if self.debug == True:
995  self.dbgout("SQL from insert_t0: %s" % sql_tube)
996  self.dbgout("SQL from insert_t0: %s" % sql_tube_c)
997  self.dbgout("SQL from insert_t0: %s" % sql_tube_v)
998 
999  try:
1000  self.exec_insert(sql_tube)
1001  self.exec_insert(sql_tube_c)
1002  self.exec_insert(sql_tube_v)
1003  except cx_Oracle.IntegrityError as exc:
1004  raise DataUniqueError(self,exc)
1005  except Exception as exc:
1006  raise T0InsertError(self,exc)
1007 
1008  # returns true on success or else throws DeleteError
1009  # I split the mdt_head and mdt_rt/mdt_rt_map just to be more flexible, no commit happens during this function
1010  def delete_head_id(self):
1011  self.connect("write","meta")
1012  sql = "DELETE FROM %s.MDT_HEAD WHERE HEAD_ID=%s and IMPLEMENTATION='%s'" % ( self.mschema, self.head_id, self.implementation)
1013 
1014  self.dbgout("SQL from delete_head_id: %s" % sql)
1015 
1016  try:
1017  self.exec_delete(sql,"meta")
1018  except Exception as exc:
1019  raise DeleteError(self,exc)
1020 
1021  def delete_rt(self,region=None):
1022  self.connect("write","data")
1023  sql_rt_map = "DELETE FROM %s.MDT_RT_MAP WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1024  sql_rt = "DELETE FROM %s.MDT_RT WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1025 
1026  if region != None:
1027  # aka as chamber_id
1028  region_id = self.get_tube_id(region)
1029  sql_rt_map += " AND REGION_ID=%s" % region_id
1030  sql_rt += " AND REGION_ID=%s" % region_id
1031  try:
1032  self.exec_delete(sql_rt_map)
1033  self.exec_delete(sql_rt)
1034  except Exception as exc:
1035  raise DeleteError(self,exc)
1036 
1037  def delete_tube(self,tube=None,chamber=None):
1038  self.connect("write","data")
1039  sql_tube_v = "DELETE FROM %s.MDT_TUBE_V WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1040  sql_tube_c = "DELETE FROM %s.MDT_TUBE_C WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1041  sql_tube = "DELETE FROM %s.MDT_TUBE WHERE HEAD_ID=%s" % (self.dschema, self.head_id)
1042  if chamber != None:
1043  chamber_id = self.get_tube_id(chamber)
1044  sql_tube += " AND CHAMBER=%s" % chamber_id
1045  chamber_tube_sql = " AND tube_id IN (SELECT tube_id FROM %s.MDT_TUBE WHERE chamber=%s)" % (self.dschema,chamber_id)
1046  sql_tube_v += chamber_tube_sql
1047  sql_tube_c += chamber_tube_sql
1048  elif tube != None and chamber == None:
1049  tube_id = self.get_tube_id(tube)
1050  sql_tube_v += " AND TUBE_ID=%s" % tube_id
1051  sql_tube_c += " AND TUBE_ID=%s" % tube_id
1052  sql_tube += " AND TUBE_ID=%s" % tube_id
1053 
1054  self.dbgout("SQL from delete_t0: %s" % sql_tube_v)
1055  self.dbgout("SQL from delete_t0: %s" % sql_tube_c)
1056  self.dbgout("SQL from delete_t0: %s" % sql_tube)
1057 
1058  try:
1059  self.exec_delete(sql_tube_v)
1060  self.exec_delete(sql_tube_c)
1061  self.exec_delete(sql_tube)
1062  except Exception as exc:
1063  raise DeleteError(self,exc)
1064 
1065  def set_rt_valid(self,tube=None):
1066  self.connect("write","data")
1067  sql_valid = "UPDATE %s.MDT_RT SET VALIDFLAG=3 WHERE HEAD_ID=%s " % (self.dschema, self.head_id)
1068  if tube != None:
1069  sql_valid += "AND REGION_ID=%s" % tube
1070 
1071  self.dbgout("SQL from set_rt_valid: %s" % sql_valid)
1072 
1073  try:
1074  self.exec_insert(sql_valid)
1075  except Exception as exc:
1076  raise UpdateError(self,exc)
1077 
1078  def set_t0_valid(self,tube=None):
1079  self.connect("write","data")
1080  sql_valid = "UPDATE %s.MDT_TUBE SET VALIDFLAG=3 WHERE HEAD_ID=%s " % (self.dschema, self.head_id)
1081  if tube != None:
1082  sql_valid += "AND TUBE_ID=%s" % tube
1083 
1084  self.dbgout("SQL from set_t0_valid: %s" % sql_valid)
1085 
1086  try:
1087  self.exec_insert(sql_valid)
1088  except Exception as exc:
1089  raise DeleteError(self,exc)
1090 
1091  def fetch_head_data(self):
1092  self.connect()
1093  sql = "SELECT HEAD_ID,IMPLEMENTATION,LOWRUN,UPRUN,LOWTIME,UPTIME,LUMINOSITY,SITE_NAME,INSERT_TIME FROM %s.MDT_HEAD ORDER BY HEAD_ID DESC" % (self.mschema)
1094 
1095  self.dbgout("SQL from fetch_head_data: %s" % sql)
1096 
1097  try:
1098  self.mcursor.execute(sql)
1099  except Exception as exc:
1100  raise QueryError(self,exc)
1101  #"%-8s\t%-30s\t%-8s\t%-8s\t%-10s\t%-10s\t%-2s\t%-2s\t%-28s\n"
1102  #"%-8s%-30s%-8s%-8s%-10s%-10s%-2s%-2s%-28s\n"
1103  head_row = self.mcursor.fetchone()
1104  head_data_rows = []
1105  while head_row:
1106  head_data_rows.append(head_row)
1107  head_row = self.mcursor.fetchone()
1108  return head_data_rows
1109 
1110  def format_head_data(self,head_data_rows):
1111  text = "%-10s%-35s%-10s%-10s%-12s%-12s%-5s%-6s%-30s\n" % ('HEAD_ID','IMPLEMENTATION','LOWRUN','UPRUN','LOWTIME','UPTIME','LUM','SITE','INSERT TIME')
1112  for data in head_data_rows:
1113  text += "%-10s%-35s%-10s%-10s%-12s%-12s%-5s%-6s%-30s\n" % (data[0],data[1],data[2],data[3],data[4],data[5],data[6],data[7],data[8])
1114  return text
1115 
1116 
1117 
1118 
1119 
1120 
1121 
CalibDataClass.CalibData.convert_datetime_oracle
def convert_datetime_oracle(self, dtobj)
Definition: CalibDataClass.py:648
CalibDataClass.CalibData.insert_adc
def insert_adc(self, tube_id, chamber_id, nhits, adc_0, adc_0_err, adc_1, adc_1_err, adc_2, adc_2_err, adc_3, adc_3_err, adc_chisquare)
Definition: CalibDataClass.py:967
CalibDataClass.CalibData.validflag
validflag
Definition: CalibDataClass.py:151
beamspotnt.var
var
Definition: bin/beamspotnt.py:1393
CalibDataClass.CalibData.exec_insert
def exec_insert(self, sql, ctype="data")
Definition: CalibDataClass.py:412
CalibDataClass.CalibData.set_t0_valid
def set_t0_valid(self, tube=None)
Definition: CalibDataClass.py:1078
CalibDataClass.CalibData.copy_head
def copy_head(self, sourceobj=None)
Definition: CalibDataClass.py:779
CalibDataClass.CalibData.debug
debug
Definition: CalibDataClass.py:82
CalibDataClass.CalibData.setup_dblink
def setup_dblink(self, srcdb, service)
Definition: CalibDataClass.py:499
CalibDataClass.CalibData.timestart
def timestart(self)
Definition: CalibDataClass.py:366
CalibDataClass.CalibData.transtotal
transtotal
Definition: CalibDataClass.py:64
CalibDataClass.CalibData.ptrace
def ptrace(self)
Definition: CalibDataClass.py:200
CalibDataClass.CalibData.delete_head_id
def delete_head_id(self)
Definition: CalibDataClass.py:1010
CalibDataClass.CalibData.hv
hv
Definition: CalibDataClass.py:148
CalibDataClass.CalibData.dbtime
dbtime
Definition: CalibDataClass.py:66
CalibDataClass.CalibDataError.args
args
Definition: CalibDataClass.py:38
CalibDataClass.DataUniqueError
Definition: CalibDataClass.py:49
CalibDataClass.CalibData.commit
def commit(self, force=False)
Definition: CalibDataClass.py:438
CalibDataClass.CalibData.statusflag
statusflag
Definition: CalibDataClass.py:128
CalibDataClass.UpdateError
Definition: CalibDataClass.py:48
CalibDataClass.CalibData.aver_angle
aver_angle
Definition: CalibDataClass.py:141
CalibDataClass.MapInsertError
Definition: CalibDataClass.py:45
MuonFixedIdUnpack
Definition: MuonFixedIdUnpack.py:1
CalibDataClass.CalibDataError.__init__
def __init__(self, caller, exc=None)
Definition: CalibDataClass.py:24
CalibDataClass.CalibData.check_opcount
def check_opcount(self)
Definition: CalibDataClass.py:426
CalibDataClass.CalibData.format_head_data
def format_head_data(self, head_data_rows)
Definition: CalibDataClass.py:1110
CalibDataClass.CalibData.insert_time
insert_time
Definition: CalibDataClass.py:136
CalibDataClass.CalibData.check_copy_args
def check_copy_args(self, sourceobj, fn)
Definition: CalibDataClass.py:690
CalibDataClass.CalibData.insert_rt_map
def insert_rt_map(self, point_nr, r, t, s)
Definition: CalibDataClass.py:934
CalibDataClass.CalibData.lowtime
lowtime
Definition: CalibDataClass.py:101
CalibDataClass.CalibData.delta_aver_spread
delta_aver_spread
Definition: CalibDataClass.py:144
CalibDataClass.CalibData.linkid
linkid
Definition: CalibDataClass.py:180
CalibDataClass.CalibData.database
database
Definition: CalibDataClass.py:223
CalibDataClass.CalibData.delete_tube
def delete_tube(self, tube=None, chamber=None)
Definition: CalibDataClass.py:1037
CalibDataClass.CalibData.format_headinfo
def format_headinfo(self)
Definition: CalibDataClass.py:538
CalibDataClass.CalibData.insert_t0
def insert_t0(self, tube_id, chamber_id, tzero, avg_adc, calibflag, stats, chi2, t0err, tmax, tmax_err, noise, noise_err, slope, slope_err)
Definition: CalibDataClass.py:984
CalibDataClass.CalibData.write_data_schema
def write_data_schema(self)
Definition: CalibDataClass.py:697
CalibDataClass.CalibData.exec_delete
def exec_delete(self, sql, ctype="data")
Definition: CalibDataClass.py:383
CalibDataClass.CalibData.rootfile
rootfile
Definition: CalibDataClass.py:123
CalibDataClass.CalibData.t0_tube_grouping
t0_tube_grouping
Definition: CalibDataClass.py:156
upper
int upper(int c)
Definition: LArBadChannelParser.cxx:49
CalibDataClass.CalibData.rollback
def rollback(self)
Definition: CalibDataClass.py:460
CalibDataClass.DeleteError
Definition: CalibDataClass.py:46
CalibDataClass.CalibData.uptime
uptime
Definition: CalibDataClass.py:110
CalibDataClass.CalibData.datauser
datauser
Definition: CalibDataClass.py:318
CalibDataClass.CalibData.write_headid
def write_headid(self)
Definition: CalibDataClass.py:727
search
void search(TDirectory *td, const std::string &s, std::string cwd, node *n)
recursive directory search for TH1 and TH2 and TProfiles
Definition: hcg.cxx:738
CalibDataClass.CalibData.dschema
dschema
Definition: CalibDataClass.py:176
CalibDataClass.CalibData.schema_archive_connection_string
schema_archive_connection_string
Definition: CalibDataClass.py:170
CalibDataClass.CalibData.__del__
def __del__(self)
Definition: CalibDataClass.py:182
CalibDataClass.CalibData.schema_active
schema_active
Definition: CalibDataClass.py:168
CalibDataClass.CalibData.get_head_info
def get_head_info(self)
Definition: CalibDataClass.py:598
LArG4FSStartPointFilterLegacy.execute
execute
Definition: LArG4FSStartPointFilterLegacy.py:20
CalibDataClass.CalibData.connect
def connect(self, access='read', schema='meta')
Definition: CalibDataClass.py:285
CalibDataClass.CalibData.tzeros
tzeros
Definition: CalibDataClass.py:157
CalibDataClass.CalibData.format_dblist
def format_dblist()
Definition: CalibDataClass.py:205
CalibDataClass.CalibData.replica
replica
Definition: CalibDataClass.py:188
CalibDataClass.CalibData.fetch_head_data
def fetch_head_data(self)
Definition: CalibDataClass.py:1091
MuonFixedIdUnpack
CalibDataClass.CalibData.luminosity
luminosity
Definition: CalibDataClass.py:118
checkCorrelInHIST.cursor
cursor
Definition: checkCorrelInHIST.py:26
CalibDataClass.CalibData.userstrings
def userstrings(self, access='read', schema='meta')
Definition: CalibDataClass.py:246
CalibDataClass.CalibData.set_active_db
def set_active_db(self)
Definition: CalibDataClass.py:350
CalibDataClass.CalibData.get_tube_string
def get_tube_string(tube)
Definition: CalibDataClass.py:588
CalibDataClass.CalibData.set_rt_valid
def set_rt_valid(self, tube=None)
Definition: CalibDataClass.py:1065
CalibDataClass.CalibData.service_re
service_re
Definition: CalibDataClass.py:53
CalibDataClass.HeadInsertError
Definition: CalibDataClass.py:41
CalibDataClass.CalibData.get_rt
def get_rt(self, chamber_id=None)
Definition: CalibDataClass.py:676
CalibDataClass.CalibData.n_segs
n_segs
Definition: CalibDataClass.py:140
CalibDataClass.CalibData.limit
limit
Definition: CalibDataClass.py:79
CalibDataClass.CalibData.uprun
uprun
Definition: CalibDataClass.py:90
CalibDataClass.CalibData.optotal
optotal
Definition: CalibDataClass.py:62
CalibDataClass.CalibData.mconn
mconn
Definition: CalibDataClass.py:171
CalibDataClass.CalibData.tnsname
tnsname
Definition: CalibDataClass.py:225
CalibDataClass.CalibData.sitename
sitename
Definition: CalibDataClass.py:224
CalibDataClass.CalibData.mschema
mschema
Definition: CalibDataClass.py:175
CalibDataClass.CalibData.lowtime_string
lowtime_string
Definition: CalibDataClass.py:100
CalibDataClass.ADCInsertError
Definition: CalibDataClass.py:44
CalibDataClass.CalibData.histogram
histogram
Definition: CalibDataClass.py:139
CalibDataClass.CalibData.copy_tzero
def copy_tzero(self, sourceobj=None, chamber=None, tube=None, ignore_missing=False)
Definition: CalibDataClass.py:867
CalibDataClass.T0InsertError
Definition: CalibDataClass.py:43
CalibDataClass.CalibData.active_schema
active_schema
Definition: CalibDataClass.py:363
CalibDataClass.CalibData.missing_attributes
def missing_attributes(self, checkvars)
Definition: CalibDataClass.py:239
CalibDataClass.CalibData.insert_rt
def insert_rt(self, regionid)
Definition: CalibDataClass.py:950
CalibDataClass.CalibData.unixts
unixts
Definition: CalibDataClass.py:55
CalibDataClass.CalibData.b_aver
b_aver
Definition: CalibDataClass.py:146
CalibDataClass.CalibData.parse_id_arg
def parse_id_arg(self, id)
Definition: CalibDataClass.py:483
CalibDataClass.CalibData.timestop
def timestop(self)
Definition: CalibDataClass.py:371
CalibDataClass.CalibData.uptime_string
uptime_string
Definition: CalibDataClass.py:109
CalibDataClass.CalibData.curr
curr
Definition: CalibDataClass.py:149
print
void print(char *figname, TCanvas *c1)
Definition: TRTCalib_StrawStatusPlots.cxx:25
CalibDataClass.CalibData.bins
bins
Definition: CalibDataClass.py:152
CalibDataClass.CalibData.calibflag
calibflag
Definition: CalibDataClass.py:135
CalibDataClass.CalibData.dconn
dconn
Definition: CalibDataClass.py:172
CalibDataClass.CalibData.mcursor
mcursor
Definition: CalibDataClass.py:174
CalibDataClass.CalibData.gas
gas
Definition: CalibDataClass.py:150
CalibDataClass.CalibData.opcount
opcount
Definition: CalibDataClass.py:60
CalibDataClass.CalibData.dbuser
dbuser
Definition: CalibDataClass.py:178
CalibDataClass.CalibData.delete_rt
def delete_rt(self, region=None)
Definition: CalibDataClass.py:1021
CalibDataClass.CalibData.lowrun
lowrun
Definition: CalibDataClass.py:85
CalibDataClass.CalibData.delta_aver_angle
delta_aver_angle
Definition: CalibDataClass.py:142
CalibDataClass.CalibData.implementation
implementation
Definition: CalibDataClass.py:132
CalibDataClass.CalibData.head_id
head_id
Definition: CalibDataClass.py:161
CalibDataClass.RTInsertError
Definition: CalibDataClass.py:42
CaloLCW_tf.group
group
Definition: CaloLCW_tf.py:28
CalibDataClass.CalibData.metauser
metauser
Definition: CalibDataClass.py:300
python.CaloAddPedShiftConfig.int
int
Definition: CaloAddPedShiftConfig.py:45
CalibDataClass.CalibData.__init__
def __init__(self, head_id=None, impl=None, lr=None, ur=None, lt=None, ut=None, luminosity=None, rootfile=None, statusflag=None)
Definition: CalibDataClass.py:57
CalibDataClass.CalibData.convergence
convergence
Definition: CalibDataClass.py:145
CalibDataClass.CalibData.calibdbwriter
calibdbwriter
Definition: CalibDataClass.py:618
CalibDataClass.CalibData.regionid
regionid
Definition: CalibDataClass.py:960
str
Definition: BTagTrackIpAccessor.cxx:11
CalibDataClass.CalibData.get_tube_id
def get_tube_id(tube)
Definition: CalibDataClass.py:583
CalibDataClass.CalibData.dbgout
def dbgout(self, output)
Definition: CalibDataClass.py:234
CalibDataClass.CalibData.writer_account
writer_account
Definition: CalibDataClass.py:177
CalibDataClass.CalibData.drop_dblink
def drop_dblink(self)
Definition: CalibDataClass.py:527
CalibDataClass.CalibData.extract_tubestring
def extract_tubestring(fullstring)
Definition: CalibDataClass.py:554
CalibDataClass.CalibData.timelock
timelock
Definition: CalibDataClass.py:70
CalibDataClass.CalibData.service
service
Definition: CalibDataClass.py:227
CalibDataClass.CalibData.t_aver
t_aver
Definition: CalibDataClass.py:147
CalibDataClass.CalibData.type
type
Definition: CalibDataClass.py:134
CalibDataClass.CalibData.setdb
def setdb(self, dbid)
Definition: CalibDataClass.py:221
CalibDataClass.CalibData.aver_spread
aver_spread
Definition: CalibDataClass.py:143
CalibDataClass.CalibData.dcursor
dcursor
Definition: CalibDataClass.py:173
CalibDataClass.QueryError
Definition: CalibDataClass.py:47
CalibDataClass.CalibData
Definition: CalibDataClass.py:52
CalibDataClass.CalibData.copy_rt
def copy_rt(self, sourceobj, chamber=None, ignore_missing=False)
Definition: CalibDataClass.py:817
CalibDataClass.CalibData.get_t0
def get_t0(self, tube_id=None, chamber_id=None)
Definition: CalibDataClass.py:655
CalibDataClass.CalibData.convert_tube
def convert_tube(tube)
Definition: CalibDataClass.py:573
Trk::split
@ split
Definition: LayerMaterialProperties.h:38
match
bool match(std::string s1, std::string s2)
match the individual directories of two strings
Definition: hcg.cxx:356
CalibDataClass.CalibData.schema_archived
schema_archived
Definition: CalibDataClass.py:169
CalibDataClass.CalibData.maxops
maxops
Definition: CalibDataClass.py:80
CalibDataClass.CalibData.timer
timer
Definition: CalibDataClass.py:68
CalibDataClass.CalibDataError
Definition: CalibDataClass.py:23
python.LArMinBiasAlgConfig.float
float
Definition: LArMinBiasAlgConfig.py:65
CalibDataClass.CalibData.proxyuser
proxyuser
Definition: CalibDataClass.py:179
CalibDataClass.DataCopyError
Definition: CalibDataClass.py:50