ATLAS Offline Software
FlexErrArrayGroup.icc
Go to the documentation of this file.
1 /*
2  Copyright (C) 2002-2021 CERN for the benefit of the ATLAS collaboration
3 */
4 
5 
6 //In principle any negative number should be fine as "magic"
7 //value. However, just to be sure, we pick something a bit odd:
8 #define ERRCHUNK_NOTSET_MAGICVALUE (-12345.6789)
9 
10 //____________________________________________________________________
11 template <class T>
12 inline FlexErrArrayGroup<T>::FlexErrArrayGroup(unsigned nbins)
13  : m_indices(0),
14  m_nbins(nbins),
15  m_chunksallocated(0),
16  m_sumw2allocated(0),
17  m_fastloop_nextbin2check(0)
18 {
19  assert(nbins<=FLEXERRARRAYGROUP_MAXBINS);
20 
21 #ifndef NDEBUG
22  //Stress-test our bit handling:
23  for (int i=0;i<FLEXERRARRAYGROUP_NCHUNKS;++i) {
24  assert(LWHistBitUtils::bitIsSet<uint8_t>(0xFF,i));
25  assert(!LWHistBitUtils::bitIsSet<uint8_t>(0x0,i));
26  uint8_t data1(0), data2(0x01);
27  LWHistBitUtils::setBit<uint8_t>(data1,i); LWHistBitUtils::setBit<uint8_t>(data2,i);
28  assert(LWHistBitUtils::bitIsSet<uint8_t>(data1,i));
29  assert(LWHistBitUtils::bitIsSet<uint8_t>(data2,i));
30  assert(LWHistBitUtils::bitIsSet<uint8_t>(data2,0));
31  }
32  for (int i=0;i<4;++i) {
33  assert((i<4)==LWHistBitUtils::bitIsSet<uint8_t>(0x0F,i));
34  assert((i<4)!=LWHistBitUtils::bitIsSet<uint8_t>(0xF0,i));
35  }
36  for (int i=0;i<FLEXERRARRAYGROUP_NCHUNKS;++i) {
37  assert(LWHistBitUtils::countSetBitsBefore<uint8_t>(0xFF,i)==i);
38  assert(LWHistBitUtils::countSetBitsBefore<uint8_t>(0x0F,i)==(i<4?i:4));
39  assert(LWHistBitUtils::countSetBitsBefore<uint8_t>(0xF0,i)==(i<4?0:i-4));
40  }
41 #endif
42 }
43 
44 //____________________________________________________________________
45 template <class T>
46 inline FlexErrArrayGroup<T>::~FlexErrArrayGroup()
47 {
48  unsigned nindices(0);
49  unsigned j(0);
50  const unsigned n(nIndicesUsedByChunks());
51  for(unsigned i=0;i<n;++i)
52  MP_DELETE((reinterpret_cast<FlexBinChunk<T>*>(m_indices[j++])));
53  nindices += n;
54 
55  if (m_sumw2allocated!=0) {
56  unsigned j(nindices);
57  const unsigned n(nIndicesUsedByErrors());
58  for(unsigned i=0;i<n;++i)
59  LWPools::release<double,FLEXBINCHUNK_NBINS>(reinterpret_cast<double*>(m_indices[j++]));
60  nindices += n;
61  }
62  if (m_indices)
63  LWPools::release(m_indices,nindices);
64 }
65 
66 //____________________________________________________________________
67 template <class T>
68 inline void FlexErrArrayGroup<T>::addIndexPointer(unsigned position, void* newval)
69 {
70  if (!m_indices) {
71  m_indices = LWPools::acquire<void*,1>();
72  m_indices[0] = newval;
73  return;
74  }
75  const unsigned nOld(nIndicesUsedByChunks()+nIndicesUsedByErrors());
76  void ** newindices = LWPools::acquire<void*>(nOld+1);
77  unsigned j(0);
78  for (unsigned i = 0; i<position;++i)
79  newindices[j++] = m_indices[i];
80  newindices[j++] = newval;
81  for (unsigned i = position; i<nOld;++i)
82  newindices[j++] = m_indices[i];
83  LWPools::release(m_indices,nOld);
84  m_indices = newindices;
85 }
86 
87 //____________________________________________________________________
88 template <class T>
89 inline const FlexBinChunk<T>* FlexErrArrayGroup<T>::getChunkNoAlloc(unsigned igroup) const
90 {
91  //returns null if not allocated:
92  if (!LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,igroup))
93  return 0;
94  else
95  return reinterpret_cast<FlexBinChunk<T>*>(m_indices[LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup)]);
96 }
97 
98 //____________________________________________________________________
99 template <class T>
100 inline FlexBinChunk<T>* FlexErrArrayGroup<T>::getChunkNoAlloc(unsigned igroup)
101 {
102  //returns null if not allocated:
103  if (!LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,igroup))
104  return 0;
105  else
106  return reinterpret_cast<FlexBinChunk<T>*>(m_indices[LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup)]);
107 }
108 
109 //____________________________________________________________________
110 template <class T>
111 inline FlexBinChunk<T>* FlexErrArrayGroup<T>::getChunk(unsigned igroup)
112 {
113  if (!LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,igroup)) {
114  //Grow index array and update status bit:
115  FlexBinChunk<T> * chunk = MP_NEW(FlexBinChunk<T>)();
116  addIndexPointer(LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup),chunk);
117  LWHistBitUtils::setBit<uint8_t>(m_chunksallocated,igroup);
118  // cppcheck-suppress assertWithSideEffect
119  assert(getChunk(igroup)==chunk);
120  // cppcheck-suppress assertWithSideEffect
121  assert(getChunkNoAlloc(igroup)==chunk);
122  return chunk;
123  } else {
124  return reinterpret_cast<FlexBinChunk<T>*>(m_indices[LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup)]);
125  }
126 }
127 
128 //____________________________________________________________________
129 template <class T>
130 inline double* FlexErrArrayGroup<T>::getErrChunk(unsigned igroup)
131 {
132  const unsigned nbefore = nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup);
133  if (m_sumw2allocated&&LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup))
134  return reinterpret_cast<double*>(m_indices[nbefore]);
135 
136  double * errchunk = LWPools::acquire<double,FLEXBINCHUNK_NBINS>();
137  //Provide proper start values (= magic values indicating not used):
138  for (unsigned i=0;i<FLEXBINCHUNK_NBINS;++i)
139  errchunk[i] = ERRCHUNK_NOTSET_MAGICVALUE;
140  addIndexPointer(nbefore,errchunk);
141  LWHistBitUtils::setBit<uint8_t>(m_sumw2allocated,igroup);
142  return errchunk;
143 }
144 
145 //____________________________________________________________________
146 template <class T>
147 inline const double* FlexErrArrayGroup<T>::getErrChunkNoAlloc(unsigned igroup) const
148 {
149  if (m_sumw2allocated&&LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup))
150  return reinterpret_cast<double*>
151  (m_indices[nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup)]);
152  return 0;
153 }
154 
155 
156 //____________________________________________________________________
157 template <class T>
158 inline void FlexErrArrayGroup<T>::fill( unsigned bin
159  STRICT_ROOT_PAR(bool pretendSumWMode) )
160 {
161  assert(bin<m_nbins);
162  getChunk(getGroupIndex(bin))->fill(getChunkBin(bin));
163 
164 #ifdef LW_STRICT_ROOT_BEHAVIOUR
165  if (pretendSumWMode) {
166  double * errchunk = getErrChunk(getGroupIndex(bin));
167  double& err = errchunk[getChunkBin(bin)];
168  if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
169  ++err;
170  else
171  err = std::abs(getBinContent(bin));
172  }
173  return;
174 #else
175  if (m_sumw2allocated) {
176  const unsigned igroup(getGroupIndex(bin));
177  if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
178  double * errchunk = getErrChunk(igroup);
179  assert(errchunk);
180  double& err = errchunk[getChunkBin(bin)];
181  if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
182  ++err;
183  }
184  }
185 #endif
186 }
187 
188 //____________________________________________________________________
189 template <class T>
190 inline void FlexErrArrayGroup<T>::fill( unsigned bin,
191  const double& weight
192  STRICT_ROOT_PAR(bool pretendSumWMode) )
193 {
194  assert(bin<m_nbins);
195  getChunk(getGroupIndex(bin))->fill(getChunkBin(bin),weight);
196 #ifdef LW_STRICT_ROOT_BEHAVIOUR
197 
198  if (pretendSumWMode) {
199  double * errchunk = getErrChunk(getGroupIndex(bin));
200  double& err = errchunk[getChunkBin(bin)];
201  if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
202  err += weight*weight;
203  else
204  err = ((std::abs(getBinContent(bin)-static_cast<T>(weight))))+weight*weight;;
205  //NB Root bug in sumw2 method. (Needs an fabs around GetBinContent(bin)).
206  }
207  return;
208 #else
209  if (m_sumw2allocated) {
210  double * errchunk = const_cast<double*>(getErrChunkNoAlloc(getGroupIndex(bin)));
211  if (errchunk) {
212  double& err = errchunk[getChunkBin(bin)];
213  if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
214  err += weight*weight;
215  }
216  }
217 #endif
218 }
219 
220 //____________________________________________________________________
221 template <class T>
222 inline void FlexErrArrayGroup<T>::copyContents(T*__restrict__ cont, double*__restrict__ err) const
223 {
224  //Assumes that input arrays are nulled out already.
225  //content:
226  if (m_chunksallocated) {
227  unsigned chunkIndex(0);
228  const unsigned chunkupper(LWHistBitUtils::posMSB(m_chunksallocated));
229  for (unsigned ichunk = LWHistBitUtils::posLSB(m_chunksallocated)-1; ichunk<chunkupper;++ichunk) {
230  if (LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,ichunk)) {
231  FlexBinChunk<T>* chunk = reinterpret_cast<FlexBinChunk<T>*>(m_indices[chunkIndex++]);
232  chunk->copyContents(&(cont[ichunk*FLEXBINCHUNK_NBINS]));
233  }
234  }
235  assert(chunkIndex==nIndicesUsedByChunks());
236  }
237  //errors:
238  if (!err)
239  return;
240  if (!m_sumw2allocated) {
241  for (unsigned ibin = 0; ibin < m_nbins; ++ibin)
242  err[ibin] = std::abs(cont[ibin]);
243  } else {
244  unsigned index(nIndicesUsedByChunks());
245  unsigned binoffset(0);
246  for (unsigned ichunk = 0; ichunk<FLEXERRARRAYGROUP_NCHUNKS;++ichunk) {
247  unsigned ibin = binoffset;
248  unsigned binupper = ibin + FLEXBINCHUNK_NBINS;
249  if (binupper>m_nbins)
250  binupper = m_nbins;
251  if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,ichunk)) {
252  double * errchunk = reinterpret_cast<double*>(m_indices[index++]);
253  for (;ibin<binupper;++ibin) {
254  assert(ibin>=binoffset);
255  double& e = errchunk[ibin-binoffset];
256  err[ibin] = (e==ERRCHUNK_NOTSET_MAGICVALUE?std::abs(cont[ibin]):e);
257  }
258  } else {
259  for (;ibin<binupper;++ibin)
260  err[ibin] = std::abs(cont[ibin]);
261  }
262  if (binupper==m_nbins)
263  break;
264  binoffset = binupper;
265  }
266  }
267 }
268 
269 //____________________________________________________________________
270 template <class T>
271 inline double FlexErrArrayGroup<T>::Integral() const
272 {
273  double result(0);
274  if (m_chunksallocated) {
275  unsigned chunkIndex(0);
276  const unsigned chunkupper(LWHistBitUtils::posMSB(m_chunksallocated));
277  for (unsigned ichunk = LWHistBitUtils::posLSB(m_chunksallocated)-1; ichunk<chunkupper;++ichunk) {
278  if (LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,ichunk)) {
279  FlexBinChunk<T>* chunk = reinterpret_cast<FlexBinChunk<T>*>(m_indices[chunkIndex++]);
280  result += chunk->Integral();
281  }
282  }
283  assert(chunkIndex==nIndicesUsedByChunks());
284  }
285  return result;
286 }
287 
288 //____________________________________________________________________
289 template <class T>
290 inline T FlexErrArrayGroup<T>::getBinContent(unsigned bin) const
291 {
292  assert(bin<m_nbins);
293  const FlexBinChunk<T>* chunk = getChunkNoAlloc(getGroupIndex(bin));
294  return chunk ? chunk->getBinContent(getChunkBin(bin)) : 0;
295 }
296 
297 //____________________________________________________________________
298 template <class T>
299 inline double FlexErrArrayGroup<T>::getBinError(unsigned bin) const
300 {
301  assert(bin<m_nbins);
302  const double * errchunk = getErrChunkNoAlloc(getGroupIndex(bin));
303  if (errchunk) {
304  const double& err = errchunk[getChunkBin(bin)];
305  return sqrt(fabs(err==ERRCHUNK_NOTSET_MAGICVALUE?getBinContent(bin):err));
306  } else {
307  return sqrt(std::abs(getBinContent(bin)));
308  }
309 }
310 
311 //____________________________________________________________________
312 template <class T>
313 inline void FlexErrArrayGroup<T>::getBinContentAndError(unsigned bin, T& content, double& error ) const
314 {
315  assert(bin<m_nbins);
316  const unsigned igroup(getGroupIndex(bin));
317  const FlexBinChunk<T>* chunk = getChunkNoAlloc(igroup);
318  content = (chunk ? chunk->getBinContent(getChunkBin(bin)) : 0);
319  if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
320  double * errchunk = reinterpret_cast<double*>
321  (m_indices[nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup)]);
322  assert(errchunk);
323  double& err = errchunk[getChunkBin(bin)];
324  error = sqrt(fabs(err==ERRCHUNK_NOTSET_MAGICVALUE?content:err));
325  } else {
326  error = (content ? sqrt(std::abs(content)) : 0);
327  }
328 }
329 
330 //____________________________________________________________________
331 template <class T>
332 inline void FlexErrArrayGroup<T>::setBinContent( unsigned bin,
333  const T& val
334  STRICT_ROOT_PAR(bool pretendSumWMode) )
335 {
336 
337 #ifdef LW_STRICT_ROOT_BEHAVIOUR
338  //If pretendSumWMode, this call must not change the error!
339  double pre_error = pretendSumWMode ? getBinError(bin) : -1;
340 #endif
341  assert(bin<m_nbins);
342  const unsigned igroup(getGroupIndex(bin));
343  if (val==0) {
344  FlexBinChunk<T>* chunk = getChunkNoAlloc(igroup);
345  if (chunk) {
346  chunk->setBinContent(getChunkBin(bin),val);
347  assert(chunk->getBinContent(getChunkBin(bin))==val);
348  }
349 #ifdef LW_STRICT_ROOT_BEHAVIOUR
350  if (pretendSumWMode)
351  getErrChunk(igroup)[getChunkBin(bin)] = pre_error*pre_error;
352  assert(!pretendSumWMode||fabs(pre_error-getBinError(bin))<1.0e-10);
353 #endif
354  return;
355  }
356  FlexBinChunk<T>* chunk = getChunk(igroup);
357  chunk->setBinContent(getChunkBin(bin),val);
358 
359  assert(chunk->getBinContent(getChunkBin(bin))==val||bothNaN(chunk->getBinContent(getChunkBin(bin)),val));
360 #ifdef LW_STRICT_ROOT_BEHAVIOUR
361  if (pretendSumWMode)
362  getErrChunk(igroup)[getChunkBin(bin)] = pre_error*pre_error;
363  assert(!pretendSumWMode||fabs(pre_error-getBinError(bin))<1.0e-10);
364 #endif
365 }
366 
367 //____________________________________________________________________
368 template <class T>
369 inline void FlexErrArrayGroup<T>::setBinError(unsigned bin, const double& error)
370 {
371  assert(bin<m_nbins);
372  const unsigned igroup(getGroupIndex(bin));
373  double * errchunk(0);
374  const unsigned nbefore = nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup);
375  if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
376  errchunk = reinterpret_cast<double*>(m_indices[nbefore]);
377  } else {
378  if (error==0&&getBinContent(bin)==0)
379  return;//If bin content is also zero, we avoid allocating anything!
380  errchunk = LWPools::acquire<double,FLEXBINCHUNK_NBINS>();
381  //Provide proper start values (= magic values indicating not used):
382  for (unsigned i=0;i<FLEXBINCHUNK_NBINS;++i)
383  errchunk[i] = ERRCHUNK_NOTSET_MAGICVALUE;
384  addIndexPointer(nbefore,errchunk);
385  LWHistBitUtils::setBit<uint8_t>(m_sumw2allocated,igroup);
386  }
387  errchunk[getChunkBin(bin)] = error*error;
388  assert(fabs(getBinError(bin)-fabs(error))<1.0e-10||bothNaN(getBinError(bin),error));
389 }
390 
391 
392 //____________________________________________________________________
393 template <class T>
394 inline void FlexErrArrayGroup<T>::setBinContentAndError( unsigned bin,
395  const T& content,
396  const double& error )
397 {
398  assert(bin<m_nbins);
399  const unsigned igroup(getGroupIndex(bin));
400  bool seterror(true);
401  if (content==0) {
402  FlexBinChunk<T>* chunk = getChunkNoAlloc(igroup);
403  if (chunk)
404  chunk->setBinContent(getChunkBin(bin),0);
405  if (error==0&&(!m_sumw2allocated||!LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)))
406  seterror = false;
407  } else {
408  //Set bin content in any case:
409  getChunk(igroup)->setBinContent(getChunkBin(bin),content);
410  //If error is not already allocated *and* it is compatible with
411  //the content, then we can avoid setting it:
412  if (!m_sumw2allocated||!LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
413  double implied_error(sqrt(std::abs(content)));
414  if (fabs(error-implied_error)/(1+std::max<double>(error,implied_error))<1.0e-10)
415  seterror = false;
416  }
417  }
418  if (seterror) {
419  //Get error chunk and set the relevant entry to "error"
420  setBinError(bin,error);//Fixme: Put only necessary code here rather than calling this:
421 
422  }
423 
424  assert(getBinContent(bin)==content);
425  assert(fabs(getBinError(bin)-fabs(error))<1.0e-10);
426 }
427 
428 //____________________________________________________________________
429 template <class T>
430 inline void FlexErrArrayGroup<T>::resetActiveBinLoop()
431 {
432  m_fastloop_nextbin2check=0;
433 }
434 
435 //____________________________________________________________________
436 template <class T>
437 inline bool FlexErrArrayGroup<T>::getNextActiveBin(unsigned& bin, T& content, double& error)
438 {
439  if (m_fastloop_nextbin2check>=m_nbins)
440  return false;
441  bin = m_fastloop_nextbin2check;//do looping using better aligned variable.
442  unsigned igroup(getGroupIndex(bin));
443 
444  //Advance the bin until we find one which has either a binchunk or an error-chunk:
445  const FlexBinChunk<T>* chunk(0);
446  const double* errchunk(0);
447  while (bin<m_nbins) {
448  //Fixme: can do it faster than this, since variables inside stay unchanged!!
449  chunk = getChunkNoAlloc(igroup);
450  errchunk = getErrChunkNoAlloc(igroup);
451  if (!chunk&&!errchunk) {
452  //Should only happen when checking the first bin in a chunk:
453  assert(bin%FLEXBINCHUNK_NBINS==0);
454  bin += FLEXBINCHUNK_NBINS;
455  ++igroup;
456  continue;
457  }
458  //Ok, have either chunk or errchunk. See if we can find an entry
459  //in it, otherwise we progress to the next chunk:
460  unsigned chunkbin = bin-igroup*FLEXBINCHUNK_NBINS;
461  assert(chunkbin<FLEXBINCHUNK_NBINS);
462  for(;chunkbin<FLEXBINCHUNK_NBINS;++chunkbin) {
463  content = chunk ? chunk->getBinContent(chunkbin) : 0;
464  if (errchunk) {
465  const double& err = errchunk[chunkbin];
466  error = ( err == ERRCHUNK_NOTSET_MAGICVALUE ? content : err );
467  } else {
468  error = content;
469  }
470  error = ( error ? sqrt(fabs(error)) : 0 );
471  if (content!=0||error!=0) {
472  bin = igroup*FLEXBINCHUNK_NBINS+chunkbin;
473  m_fastloop_nextbin2check = bin+1;
474  return true;
475  }
476  }
477  ++igroup;
478  bin = FLEXBINCHUNK_NBINS*igroup;
479  }
480  return false;
481 }