8 __author__ =
"Sebastien Binet <binet@cern.ch>"
25 from dbm
import whichdb
27 from .Helpers
import ShutUp
36 """ reverse-engineering of the POOL FileCatalog.
37 allows to retrieve the physical filename from a logical one, provided
38 that the file-id is known to the (real) PoolFileCatalog
40 DefaultCatalog =
"xmlcatalog_file:PoolFileCatalog.xml"
51 super (PoolFileCatalog, self).
__init__()
58 if isinstance(catalog, str):
61 if not isinstance (catalog, (str, list)):
63 "catalog contact string should be a string or a list thereof! (got %r)"%
68 return osp.expanduser(osp.expandvars(x))
70 def _handle_apcfile_old(x):
71 """ return $ATLAS_POOLCOND_PATH/poolcond/x
73 if 'ATLAS_POOLCOND_PATH' not in os.environ:
75 pcp = os.environ[
"ATLAS_POOLCOND_PATH"]
76 if x.startswith(
"apcfile:"):
77 x = x[len(
"apcfile:"):]
78 return osp_exp(osp.join(pcp,
'poolcond', x))
80 def _handle_apcfile(x):
81 """ return $ATLAS_POOLCOND_PATH/x
83 if 'ATLAS_POOLCOND_PATH' not in os.environ:
85 pcp = os.environ[
"ATLAS_POOLCOND_PATH"]
86 if x.startswith(
"apcfile:"):
87 x = x[len(
"apcfile:"):]
88 return osp_exp(osp.join(pcp, x))
90 def _handle_xmlcatalog_file(x):
91 return osp_exp(x[len(
"xmlcatalog_file:"):])
93 def _handle_prfile(x):
94 x = x[len(
"prfile:"):]
97 import AthenaCommon.Utils.unixtools
as u
99 os.environ[
'DATAPATH'].
split(os.pathsep),
110 "xmlcatalog_file:": _handle_xmlcatalog_file,
111 "apcfile:": _handle_apcfile,
112 "prfile:": _handle_prfile,
113 "file:": _handle_file,
116 "catalog dispatch keys does not match AllowedProtocols:" \
117 "\n%s\n%s" % (
sorted(cat_dispatch.keys()),
120 from .
import xmldict
121 def _build_catalog(catalog):
124 "sorry PoolFile:PoolFileCatalog only supports %s"
125 " as a protocol for the POOL file catalog (got: '%s')"
128 for protocol, handler
in cat_dispatch.iteritems():
129 if catalog.startswith(protocol):
135 if not os.path.exists (catalog):
142 root = xmldict.ElementTree.parse (catalog).getroot()
143 return dict(xmldict.xml2dict(root))
146 cat = {
'POOLFILECATALOG':{
'File':[]}}
149 bc = _build_catalog(c)
150 pc = bc.get(
'POOLFILECATALOG',{})
153 files = pc.get(
'File',[])
154 if isinstance(files, dict):
156 cat[
'POOLFILECATALOG'][
'File'].extend(files)
157 except Exception
as err:
166 def pfn (self, url_or_fid):
167 """find the physical file name given a url or a file-id"""
168 import os.path
as osp
169 url_or_fid = osp.expanduser(osp.expandvars(url_or_fid))
171 if isinstance (url_or_fid, types.ListType):
172 return [self.
_pfn(f)
for f
in url_or_fid]
174 return self.
_pfn(url_or_fid)
177 """find the physical file name given a url or a file-id"""
178 if not (
'POOLFILECATALOG' in self.
catalog):
180 if not (
'File' in self.
catalog[
'POOLFILECATALOG']):
185 files = self.
catalog[
'POOLFILECATALOG'][
'File']
186 if isinstance(files, dict):
190 if url_or_fid.lower().startswith(
'fid:'):
191 url_or_fid = url_or_fid[len(
'fid:'):]
192 if re.compile (
r'\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$').match (url_or_fid):
193 fid = url_or_fid.lower()
198 if f.ID.lower() == fid:
201 if isinstance(pfn, (list,tuple)):
202 match[fid].
append([i.name
for i
in pfn])
204 match[fid].
append([pfn.name])
205 if len(match[fid])==1:
206 return match[fid][0][PFN_IDX]
207 if len(match[fid])>1:
209 "more than one match for FID='%s'!\n%r"%(fid,match)
211 raise KeyError (
"no entry with FID='%s' in catalog" % fid)
214 if url.lower().startswith(
"lfn:"):
215 url = url[len(
"lfn:"):]
221 and f.logical.lfn.name == url):
224 if isinstance(pfn, (list,tuple)):
225 match[url].
append([i.name
for i
in pfn])
227 match[url].
append([pfn.name])
228 if len(match[url])==1:
229 return match[url][0][PFN_IDX]
230 if len(match[url])>1:
232 "more than one match for LFN='%s'!\n%r"%(url,match)
234 raise KeyError (
"no entry with LFN='%s' in catalog" % url)
236 if url.lower().startswith(
"pfn:"):
237 url = url[len(
"pfn:"):]
241 return self.
pfn (url_or_fid)
251 EventData =
"CollectionTree"
252 EventTag =
"POOLCollectionTree"
253 DataHeader =
"POOLContainer"
254 MetaData =
"MetaData"
256 EventData =
"EventData"
257 EventTag =
"EventTag"
258 DataHeader =
"DataHeader"
259 MetaData =
"MetaData"
262 SUPER_DETAILED_BRANCH_SZ =
False
264 POOL_HEADER = TTreeNames.DataHeader
265 EVENT_DATA = TTreeNames.EventData
266 META_DATA = TTreeNames.MetaData
267 HDR_FORMAT =
" %11s %11s %11s %11s %5s %s"
268 ROW_FORMAT =
"%12.3f kb %12.3f kb %12.3f kb %12.3f %8i %s"
272 return not name.startswith(
"##")
and not cls.
isDataHeader(name)
282 return name.startswith(PoolOpts.EVENT_DATA)
286 return "_DAOD_" in name
290 s = (name+
"__").
split(
'_')[2]
291 if s.endswith(
"Form"):
297 return name.startswith(PoolOpts.POOL_HEADER)
and cls.
isAugmentation(name)
302 if PoolOpts.FAST_MODE:
304 if not PoolOpts.SUPER_DETAILED_BRANCH_SZ:
305 return branch.GetTotalSize()
308 for bnum
in range(0, branch.GetWriteBasket()):
309 basket = branch.GetBasket(bnum)
310 brSize += basket.GetObjlen() - 8
314 """take a file name, return the pair (protocol, 'real' file name)
316 fname = os.path.expanduser(os.path.expandvars(fname))
318 def _normalize_uri(uri):
319 if uri.startswith(
'/'):
323 from urllib.parse
import urlsplit
324 url = urlsplit(_normalize_uri(fname))
325 protocol = url.scheme
326 def _normalize(fname):
327 from posixpath
import normpath
328 fname = normpath(fname)
329 if fname.startswith(
'//'): fname = fname[1:]
332 if protocol
in (
'',
'file',
'pfn'):
334 fname = _normalize(url.path)
337 if fname.startswith(
'/castor/'):
339 fname = protocol +
':' + fname
341 elif protocol
in (
'rfio',
'castor'):
343 fname = _normalize(url.path)
344 fname = protocol+
':'+fname
346 elif protocol
in (
'root',
'dcap',
'dcache',
'http',
'https',
'dav',
'davs'):
349 elif protocol
in (
'gsidcap',):
350 protocol =
'gfal:gsidcap'
353 elif protocol
in (
'lfn',
'fid',):
355 from PyUtils.PoolFile
import PoolFileCatalog
as pfc
356 fname = pfc().pfn(protocol+
':'+url.path)
359 elif protocol
in (
'ami',):
361 for token
in (
'ami:',
'//',
'/'):
362 if fname.startswith(token):
363 fname = fname[len(token):]
364 fname =
'ami://' + fname
368 print(f
'## warning: unknown protocol [{protocol}]. we will just return our input')
371 return (protocol, fname)
374 x509_proxy = os.environ.get(
'X509_USER_PROXY',
'')
377 root.TSSLSocket.SetUpSSL(
379 "/etc/grid-security/certificates",
383 print(
"## warning: protocol https is requested but no X509_USER_PROXY was found! (opening the file might fail.)")
388 import PyUtils.RootUtils
as ru
389 root = ru.import_root()
393 re.compile(
'TClass::TClass:0: RuntimeWarning: no dictionary for class.*') ]):
394 root.gSystem.Load(
'libRootCollection')
395 root_open = root.TFile.Open
400 if protocol ==
'https':
402 root_open = root.TWebFile.Open
404 f = root_open(fname,
'READ')
405 if f
is None or not f:
407 raise IOError(errno.ENOENT,
408 'No such file or directory',fname)
413 fmt =
"%s %3i %8.3f %8.3f %8.3f %s"
416 branch.GetListOfBranches().GetSize(),
417 _get_total_size (branch),
418 branch.GetTotBytes(),
419 branch.GetZipBytes(),
423 branches = branch.GetListOfBranches()
425 poolRecord.memSize += _get_total_size (b) / Units.kb
426 if (b.GetZipBytes() < 0.001):
427 poolRecord.memSizeNoZip += _get_total_size (b) / Units.kb
428 poolRecord.diskSize += b.GetZipBytes() / Units.kb
429 poolRecord = retrieveBranchInfos ( b, poolRecord, ident+
" " )
434 memSize = _get_total_size (branch) / Units.kb
435 zipBytes = branch.GetZipBytes()
436 memSizeNoZip = memSize
if zipBytes < 0.001
else 0.
437 diskSize = branch.GetZipBytes() / Units.kb
438 typeName = branch.GetClassName()
439 if not typeName
and (leaf := branch.GetListOfLeaves().At(0)):
440 typeName = leaf.GetTypeName()
441 return PoolRecord(branch.GetName(), memSize, diskSize, memSizeNoZip,
447 """Helper function to read a POOL file and extract the item-list from the
450 `pool_file` the name of the pool file to inspect
451 `verbose` self-explanatory
452 `items_type` what kind of items one is interested in
453 allowed values: 'eventdata' 'metadata'
454 Note: this function is actually executed in a forked sub-process
457 _allowed_values = (
'eventdata',
459 if items_type
not in _allowed_values:
461 "invalid argument for 'items_type'. ",
462 "got: [%s] " % items_type,
463 "(allowed values: %r)" % _allowed_values
465 raise ValueError(err)
467 key =
'%s_items' % items_type
469 import PyUtils.FilePeekerTool
as fpt
470 fp = fpt.FilePeekerTool(f_root)
471 items = fp.getPeekedData(key)
481 DiskSize =
"diskSize"
483 ContainerName =
"name"
487 return [ PoolRecord.Sorter.DiskSize,
488 PoolRecord.Sorter.MemSize,
489 PoolRecord.Sorter.ContainerName ]
491 def __init__(self, name, memSize, diskSize, memSizeNoZip, nEntries, dirType,
492 detailedInfos = "", typeName = None):
493 """Initialize PoolRecord instance.
495 dirType first letter of object type name that may distinguish the types:
496 "T" for TTree, "B" for TBranch,
497 "N" for RNTuple, "F" for RField
499 object.__init__(self)
513 A simple class to retrieve informations about the content of a POOL file.
514 It should be abstracted from the underlying technology used to create this
515 POOL file (Db, ROOT,...).
516 Right now, we are using the easy and loosy solution: going straight to the
521 object.__init__(self)
536 except Exception
as err:
537 print(
"## warning: problem opening PoolFileCatalog:\n%s"%err)
539 traceback.print_exc(err)
543 dbFileName = whichdb( fileName )
544 if dbFileName
not in (
None,
'' ):
546 print(
"## opening file [%s]..." %
str(fileName))
547 db = shelve.open( fileName,
'r' )
549 print(
"## opening file [OK]")
550 report = db[
'report']
553 self.
data = report[
'data']
556 print(
"## opening file [%s]..." %
str(fileName))
559 print(
"## opening file [OK]")
568 print(
"## importing ROOT...")
569 import PyUtils.RootUtils
as ru
570 ROOT = ru.import_root()
573 print(
"## importing ROOT... [DONE]")
577 ROOT.gErrorIgnoreLevel = ROOT.kFatal
581 poolFile = ROOT.TFile.Open( fileName, PoolOpts.READ_MODE )
582 except Exception
as e:
584 print(
"## Failed to open file [%s] !!" % fileName)
587 print(
"## Bailing out...")
588 raise IOError(
"Could not open file [%s]" % fileName)
593 print(
"## Failed to open file [%s] !!" % fileName)
594 msg =
"Could not open file [%s]" % fileName
599 "Invalid POOL file or a Zombie one"
608 for name
in {PoolOpts.TTreeNames.DataHeader, PoolOpts.RNTupleNames.DataHeader}:
609 dhKey = self.
poolFile.FindKey( name )
612 if isinstance(obj, self.
ROOT.TTree):
613 nEntries = obj.GetEntries()
614 elif isinstance(obj, self.
ROOT.Experimental.RNTuple):
615 nEntries = self.
ROOT.Experimental.RNTupleReader.Open(obj).GetNEntries()
617 raise NotImplementedError(f
"Keys of type {type(obj)!r} not supported")
624 for k
in self.
poolFile.GetListOfKeys():
625 keyname = k.GetName()
627 if isinstance(obj, self.
ROOT.TTree):
628 containerName = obj.GetName()
629 nEntries = obj.GetEntries()
631 elif isinstance(obj, self.
ROOT.Experimental.RNTuple):
632 reader = self.
ROOT.Experimental.RNTupleReader.Open(obj)
633 containerName = reader.GetDescriptor().GetName()
634 nEntries = reader.GetNEntries()
637 raise NotImplementedError(f
"Keys of type {type(obj)!r} not supported")
638 if containerName
not in containers:
640 containers.append(containerName)
642 if keyname.startswith(PoolOpts.POOL_HEADER)
and not keyname.endswith(
'Form'):
643 self.
dataHeaderA[PoolOpts.augmentationName(keyname)] = \
648 keys.sort (key =
lambda x: x.GetName())
654 if isinstance(obj, self.
ROOT.TTree):
656 elif isinstance(obj, self.
ROOT.Experimental.RNTuple):
657 reader = self.
ROOT.Experimental.RNTupleReader.Open(obj)
658 name = reader.GetDescriptor().GetName()
660 if PoolOpts.isDataHeader(name):
661 contName =
"DataHeader"
662 if isinstance(obj, self.
ROOT.TTree):
663 memSize = obj.GetTotBytes() / Units.kb
664 diskSize = obj.GetZipBytes() / Units.kb
667 memSizeNoZip = memSize
668 nEntries = obj.GetEntries()
672 br.GetName()
for br
in obj.GetListOfBranches()
673 if br.GetName().
count(
"DataHeader_p") > 0
675 if len(dhBranchNames) == 1:
676 dhBranch = obj.GetBranch(dhBranchNames[0])
677 typeName = dhBranch.GetClassName()
678 if not typeName
and (leaf := dhBranch.GetListOfLeaves().At(0)):
679 typeName = leaf.GetTypeName()
685 typeName = typeName ),
689 poolRecord =
PoolRecord(contName, memSize, diskSize, memSizeNoZip,
694 elif isinstance(obj, self.
ROOT.Experimental.RNTuple):
695 reader = self.
ROOT.Experimental.RNTupleReader.Open(obj)
696 inspector = self.
ROOT.Experimental.RNTupleInspector.Create(obj)
697 diskSize = inspector.GetCompressedSize() / Units.kb
698 memSize = inspector.GetUncompressedSize() / Units.kb
702 memSizeNoZip = memSize
703 nEntries = reader.GetNEntries()
704 poolRecord =
PoolRecord(contName, memSize, diskSize, memSizeNoZip,
708 elif PoolOpts.isData(name):
709 if isinstance(obj, self.
ROOT.TTree):
710 if not hasattr(obj,
'GetListOfBranches'):
712 branches = obj.GetListOfBranches()
714 if name
in (PoolOpts.EVENT_DATA, PoolOpts.META_DATA):
716 for branch
in branches:
722 poolRecord.augName = PoolOpts.augmentationName(name)
724 self.
data += [ poolRecord ]
725 elif isinstance(obj, self.
ROOT.Experimental.RNTuple):
726 reader = self.
ROOT.Experimental.RNTupleReader.Open(obj)
727 descriptor = reader.GetDescriptor()
728 inspector = self.
ROOT.Experimental.RNTupleInspector.Create(obj)
730 if name
in {PoolOpts.RNTupleNames.EventData, PoolOpts.RNTupleNames.MetaData}:
732 fieldZeroId = descriptor.GetFieldZeroId()
733 for fieldDescriptor
in descriptor.GetFieldIterable(fieldZeroId):
734 fieldId = fieldDescriptor.GetId()
735 fieldTreeInspector = inspector.GetFieldTreeInspector(fieldId)
736 diskSize = fieldTreeInspector.GetCompressedSize() / Units.kb
737 memSize = fieldTreeInspector.GetUncompressedSize() / Units.kb
738 fieldDescriptor = fieldTreeInspector.GetDescriptor()
739 typeName = fieldDescriptor.GetTypeName()
740 fieldName = fieldDescriptor.GetFieldName()
741 poolRecord =
PoolRecord(fieldName, memSize, diskSize, memSize,
742 descriptor.GetNEntries(),
745 poolRecord.augName = PoolOpts.augmentationName(name)
747 self.
data += [ poolRecord ]
753 return os.linesep.join( [
755 "Size: %12.3f kb" % (self.
_fileInfos[
'size'] / Units.kb),
760 def checkFile(self, sorting = PoolRecord.Sorter.DiskSize):
770 if sorting
in PoolRecord.Sorter.allowedValues():
772 data.sort(key = operator.attrgetter(sorting) )
774 def _get_val(x, dflt=-999.):
775 if PoolOpts.FAST_MODE:
779 totMemSize = _get_val(self.
dataHeader.memSize, dflt=0.)
782 def _safe_div(num,den):
790 print(PoolOpts.HDR_FORMAT % (
"Mem Size",
"Disk Size",
"Size/Evt",
791 "MissZip/Mem",
"items",
792 "(X) Container Name (X=Tree|Branch)" ))
795 print(PoolOpts.ROW_FORMAT % (
799 _get_val (_safe_div(self.
dataHeader.memSizeNoZip,
809 totMemSize += 0.
if PoolOpts.FAST_MODE
else d.memSize
810 totDiskSize += d.diskSize
811 memSizeNoZip = d.memSizeNoZip/d.memSize
if d.memSize != 0.
else 0.
813 totMemSizeA[aug] = totMemSizeA.get(aug,0.) + d.memSize
814 totDiskSizeA[aug] = totDiskSizeA.get(aug,0.) + d.diskSize
816 print(PoolOpts.ROW_FORMAT % (
817 _get_val (d.memSize),
820 _get_val (memSizeNoZip),
822 "("+d.dirType+
") "+d.name
830 print(PoolOpts.ROW_FORMAT % (
831 totMemSizeA[a], totDiskSizeA[a],
835 "Aug Stream: " + (
'MAIN' if a==
'' else a)
838 print(PoolOpts.ROW_FORMAT % (
839 totMemSize, totDiskSize,
842 "TOTAL (POOL containers)"
845 if PoolOpts.FAST_MODE:
846 print(
"::: warning: FAST_MODE was enabled: some columns' content ",)
847 print(
"is meaningless...")
853 print(
"Can't perform a detailedDump with a shelve file as input !")
856 if bufferName == sys.stdout.name:
857 bufferName =
"/dev/stdout"
858 out =
open( bufferName,
"w" )
860 save_stdout_fileno = os.dup (sys.stdout.fileno())
861 os.dup2( out.fileno(), sys.stdout.fileno() )
863 out.write(
"#" * 80 + os.linesep )
864 out.write(
"## detailed dump" + os.linesep )
867 for key
in self.
keys:
869 name = tree.GetName()
871 if PoolOpts.isDataHeader(name)
or \
872 PoolOpts.isData(name):
874 print (
"=== [%s] ===" % name, file=sys.stderr)
876 except Exception
as err:
877 print (
"Caught:",err, file=sys.stderr)
878 print (sys.exc_info()[0], file=sys.stderr)
879 print (sys.exc_info()[1], file=sys.stderr)
883 out.write(
"#" * 80 + os.linesep )
885 out.write(
"#" * 80 + os.linesep )
891 if bufferName !=
"<stdout>":
894 sys.stdout = open (save_stdout_fileno,
'a')
899 Return a PoolRecord according to its (branch) name
900 Raise KeyError if no match is found
902 for data
in self.data:
903 if data.name == name:
905 raise KeyError(
"No PoolRecord with name [%s]" % name)
909 Save all the gathered informations into a python shelve or a CSV file
910 (depending on the @param `fileName` extension)
913 if os.path.splitext(fileName)[-1] ==
'.csv':
914 return self._save_csv_report (fileName)
915 return self._save_shelve_report (fileName)
919 Save all the gathered informations into a python shelve
920 Data can then be read like so:
922 >>> db = shelve.open( 'myfile.dat', 'r' )
923 >>> report = db['report']
924 >>> print ('fileSize:',report['fileSize'])
925 >>> print ('dataHeader/memSize:',report['dataHeader'].memSize)
926 >>> for d in report['data']:
927 ... print ('data:',d.name,d.nEntries,d.memSize)
930 if os.path.exists (fileName):
932 db = shelve.open (fileName)
934 'fileInfos' : self._fileInfos,
935 'nbrEvts' : self.dataHeader.nEntries,
936 'dataHeader' : self.dataHeader,
944 Save all the gathered informations into a CSV file
947 if os.path.exists (fileName):
949 args = {
'newline' :
''}
950 f = open (fileName,
'w', **args)
952 o.writerow ([
'file name', self._fileInfos[
'name']])
953 o.writerow ([
'file size', self._fileInfos[
'size']])
954 o.writerow ([
'nbr evts', self.dataHeader.nEntries])
955 o.writerow ([
'mem size',
'disk size',
'mem size nozip',
'items',
956 'container name',
'branch type'])
959 o.writerow ([d.memSize, d.diskSize, d.memSizeNoZip,
960 d.nEntries, d.name, d.dirType])
965 if self.poolFile
and hasattr(self.poolFile,
'Close'):
967 self.poolFile.Close()
969 except Exception
as err:
970 print(
"WARNING:",err)
977 A helper class to compare 2 POOL files and check that they match, both in
978 terms of containers' content and containers' sizes
981 def __init__(self, refFileName, chkFileName, verbose = False, ignoreList = None, strict = False):
982 object.__init__(self)
986 refFileName = os.path.expandvars( os.path.expanduser( refFileName ) )
987 chkFileName = os.path.expandvars( os.path.expanduser( chkFileName ) )
989 if ignoreList
is None:
996 except Exception
as err:
997 print(
"## Caught exception [%s] !!" %
str(err.__class__))
998 print(
"## What:",err)
999 print(sys.exc_info()[0])
1000 print(sys.exc_info()[1])
1001 err =
"Error while opening POOL files !"
1002 err +=
" chk : %s%s" % ( chkFileName, os.linesep )
1003 err +=
" ref : %s%s" % ( refFileName, os.linesep )
1004 raise Exception(err)
1016 "::: Comparing POOL files...",
1017 " ref : %s" % self.
refFile._fileInfos[
'name'],
1018 " chk : %s" % self.
chkFile._fileInfos[
'name'],
1022 if self.
chkFile.dataHeader.nEntries != \
1023 self.
refFile.dataHeader.nEntries :
1025 "## WARNING: files don't have the same number of entries !!",
1026 " ref : %r" % self.
refFile.dataHeader.nEntries,
1027 " chk : %r" % self.
chkFile.dataHeader.nEntries,
1033 if chkNames != refNames:
1035 "## ERROR: files don't have the same content !!",
1037 addNames = [ n
for n
in chkNames
if n
not in refNames ]
1038 if len( addNames ) > 0:
1039 self.
summary += [
"## collections in 'chk' and not in 'ref'" ]
1041 self.
summary += [
" + %s" % n ]
1042 subNames = [ n
for n
in refNames
if n
not in chkNames ]
1043 if len( subNames ) > 0:
1044 self.
summary += [
"## collections in 'ref' and not in 'chk'" ]
1046 self.
summary += [
" - %s" % n ]
1051 self.
summary += [
"## Ignoring the following:" ]
1055 commonContent = [ d
for d
in chkNames
if (d
in refNames
and d
not in self.
ignList)]
1059 self.
summary += [
"::: comparing common content (mem-size / disk-size)..." ]
1061 for name
in commonContent:
1067 if chkMemSize != refMemSize
or (self.
strict and chkDiskSize != refDiskSize):
1069 "[ERR] %12.3f / %12.3f kb (ref) ==> %12.3f / %12.3f kb (chk) | %s" % \
1070 ( refMemSize,refDiskSize,chkMemSize,chkDiskSize, name )
1075 " [OK] %12.3f/%12.3f kb | %s" % \
1076 ( chkMemSize, chkDiskSize, name )
1083 else: self.
summary += [
"## Comparison : [ERR]" ]
1093 out.writelines( i + os.linesep )
1099 A counter just contains an item list (pairs class-name/sg-key) and the size
1103 object.__init__(self)