2 Copyright (C) 2002-2021 CERN for the benefit of the ATLAS collaboration
6 //In principle any negative number should be fine as "magic"
7 //value. However, just to be sure, we pick something a bit odd:
8 #define ERRCHUNK_NOTSET_MAGICVALUE (-12345.6789)
10 //____________________________________________________________________
12 inline FlexErrArrayGroup<T>::FlexErrArrayGroup(unsigned nbins)
17 m_fastloop_nextbin2check(0)
19 assert(nbins<=FLEXERRARRAYGROUP_MAXBINS);
22 //Stress-test our bit handling:
23 for (int i=0;i<FLEXERRARRAYGROUP_NCHUNKS;++i) {
24 assert(LWHistBitUtils::bitIsSet<uint8_t>(0xFF,i));
25 assert(!LWHistBitUtils::bitIsSet<uint8_t>(0x0,i));
26 uint8_t data1(0), data2(0x01);
27 LWHistBitUtils::setBit<uint8_t>(data1,i); LWHistBitUtils::setBit<uint8_t>(data2,i);
28 assert(LWHistBitUtils::bitIsSet<uint8_t>(data1,i));
29 assert(LWHistBitUtils::bitIsSet<uint8_t>(data2,i));
30 assert(LWHistBitUtils::bitIsSet<uint8_t>(data2,0));
32 for (int i=0;i<4;++i) {
33 assert((i<4)==LWHistBitUtils::bitIsSet<uint8_t>(0x0F,i));
34 assert((i<4)!=LWHistBitUtils::bitIsSet<uint8_t>(0xF0,i));
36 for (int i=0;i<FLEXERRARRAYGROUP_NCHUNKS;++i) {
37 assert(LWHistBitUtils::countSetBitsBefore<uint8_t>(0xFF,i)==i);
38 assert(LWHistBitUtils::countSetBitsBefore<uint8_t>(0x0F,i)==(i<4?i:4));
39 assert(LWHistBitUtils::countSetBitsBefore<uint8_t>(0xF0,i)==(i<4?0:i-4));
44 //____________________________________________________________________
46 inline FlexErrArrayGroup<T>::~FlexErrArrayGroup()
50 const unsigned n(nIndicesUsedByChunks());
51 for(unsigned i=0;i<n;++i)
52 MP_DELETE((reinterpret_cast<FlexBinChunk<T>*>(m_indices[j++])));
55 if (m_sumw2allocated!=0) {
57 const unsigned n(nIndicesUsedByErrors());
58 for(unsigned i=0;i<n;++i)
59 LWPools::release<double,FLEXBINCHUNK_NBINS>(reinterpret_cast<double*>(m_indices[j++]));
63 LWPools::release(m_indices,nindices);
66 //____________________________________________________________________
68 inline void FlexErrArrayGroup<T>::addIndexPointer(unsigned position, void* newval)
71 m_indices = LWPools::acquire<void*,1>();
72 m_indices[0] = newval;
75 const unsigned nOld(nIndicesUsedByChunks()+nIndicesUsedByErrors());
76 void ** newindices = LWPools::acquire<void*>(nOld+1);
78 for (unsigned i = 0; i<position;++i)
79 newindices[j++] = m_indices[i];
80 newindices[j++] = newval;
81 for (unsigned i = position; i<nOld;++i)
82 newindices[j++] = m_indices[i];
83 LWPools::release(m_indices,nOld);
84 m_indices = newindices;
87 //____________________________________________________________________
89 inline const FlexBinChunk<T>* FlexErrArrayGroup<T>::getChunkNoAlloc(unsigned igroup) const
91 //returns null if not allocated:
92 if (!LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,igroup))
95 return reinterpret_cast<FlexBinChunk<T>*>(m_indices[LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup)]);
98 //____________________________________________________________________
100 inline FlexBinChunk<T>* FlexErrArrayGroup<T>::getChunkNoAlloc(unsigned igroup)
102 //returns null if not allocated:
103 if (!LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,igroup))
106 return reinterpret_cast<FlexBinChunk<T>*>(m_indices[LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup)]);
109 //____________________________________________________________________
111 inline FlexBinChunk<T>* FlexErrArrayGroup<T>::getChunk(unsigned igroup)
113 if (!LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,igroup)) {
114 //Grow index array and update status bit:
115 FlexBinChunk<T> * chunk = MP_NEW(FlexBinChunk<T>)();
116 addIndexPointer(LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup),chunk);
117 LWHistBitUtils::setBit<uint8_t>(m_chunksallocated,igroup);
118 // cppcheck-suppress assertWithSideEffect
119 assert(getChunk(igroup)==chunk);
120 // cppcheck-suppress assertWithSideEffect
121 assert(getChunkNoAlloc(igroup)==chunk);
124 return reinterpret_cast<FlexBinChunk<T>*>(m_indices[LWHistBitUtils::countSetBitsBefore<uint8_t>(m_chunksallocated,igroup)]);
128 //____________________________________________________________________
130 inline double* FlexErrArrayGroup<T>::getErrChunk(unsigned igroup)
132 const unsigned nbefore = nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup);
133 if (m_sumw2allocated&&LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup))
134 return reinterpret_cast<double*>(m_indices[nbefore]);
136 double * errchunk = LWPools::acquire<double,FLEXBINCHUNK_NBINS>();
137 //Provide proper start values (= magic values indicating not used):
138 for (unsigned i=0;i<FLEXBINCHUNK_NBINS;++i)
139 errchunk[i] = ERRCHUNK_NOTSET_MAGICVALUE;
140 addIndexPointer(nbefore,errchunk);
141 LWHistBitUtils::setBit<uint8_t>(m_sumw2allocated,igroup);
145 //____________________________________________________________________
147 inline const double* FlexErrArrayGroup<T>::getErrChunkNoAlloc(unsigned igroup) const
149 if (m_sumw2allocated&&LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup))
150 return reinterpret_cast<double*>
151 (m_indices[nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup)]);
156 //____________________________________________________________________
158 inline void FlexErrArrayGroup<T>::fill( unsigned bin
159 STRICT_ROOT_PAR(bool pretendSumWMode) )
162 getChunk(getGroupIndex(bin))->fill(getChunkBin(bin));
164 #ifdef LW_STRICT_ROOT_BEHAVIOUR
165 if (pretendSumWMode) {
166 double * errchunk = getErrChunk(getGroupIndex(bin));
167 double& err = errchunk[getChunkBin(bin)];
168 if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
171 err = std::abs(getBinContent(bin));
175 if (m_sumw2allocated) {
176 const unsigned igroup(getGroupIndex(bin));
177 if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
178 double * errchunk = getErrChunk(igroup);
180 double& err = errchunk[getChunkBin(bin)];
181 if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
188 //____________________________________________________________________
190 inline void FlexErrArrayGroup<T>::fill( unsigned bin,
192 STRICT_ROOT_PAR(bool pretendSumWMode) )
195 getChunk(getGroupIndex(bin))->fill(getChunkBin(bin),weight);
196 #ifdef LW_STRICT_ROOT_BEHAVIOUR
198 if (pretendSumWMode) {
199 double * errchunk = getErrChunk(getGroupIndex(bin));
200 double& err = errchunk[getChunkBin(bin)];
201 if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
202 err += weight*weight;
204 err = ((std::abs(getBinContent(bin)-static_cast<T>(weight))))+weight*weight;;
205 //NB Root bug in sumw2 method. (Needs an fabs around GetBinContent(bin)).
209 if (m_sumw2allocated) {
210 double * errchunk = const_cast<double*>(getErrChunkNoAlloc(getGroupIndex(bin)));
212 double& err = errchunk[getChunkBin(bin)];
213 if (err!=ERRCHUNK_NOTSET_MAGICVALUE)
214 err += weight*weight;
220 //____________________________________________________________________
222 inline void FlexErrArrayGroup<T>::copyContents(T*__restrict__ cont, double*__restrict__ err) const
224 //Assumes that input arrays are nulled out already.
226 if (m_chunksallocated) {
227 unsigned chunkIndex(0);
228 const unsigned chunkupper(LWHistBitUtils::posMSB(m_chunksallocated));
229 for (unsigned ichunk = LWHistBitUtils::posLSB(m_chunksallocated)-1; ichunk<chunkupper;++ichunk) {
230 if (LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,ichunk)) {
231 FlexBinChunk<T>* chunk = reinterpret_cast<FlexBinChunk<T>*>(m_indices[chunkIndex++]);
232 chunk->copyContents(&(cont[ichunk*FLEXBINCHUNK_NBINS]));
235 assert(chunkIndex==nIndicesUsedByChunks());
240 if (!m_sumw2allocated) {
241 for (unsigned ibin = 0; ibin < m_nbins; ++ibin)
242 err[ibin] = std::abs(cont[ibin]);
244 unsigned index(nIndicesUsedByChunks());
245 unsigned binoffset(0);
246 for (unsigned ichunk = 0; ichunk<FLEXERRARRAYGROUP_NCHUNKS;++ichunk) {
247 unsigned ibin = binoffset;
248 unsigned binupper = ibin + FLEXBINCHUNK_NBINS;
249 if (binupper>m_nbins)
251 if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,ichunk)) {
252 double * errchunk = reinterpret_cast<double*>(m_indices[index++]);
253 for (;ibin<binupper;++ibin) {
254 assert(ibin>=binoffset);
255 double& e = errchunk[ibin-binoffset];
256 err[ibin] = (e==ERRCHUNK_NOTSET_MAGICVALUE?std::abs(cont[ibin]):e);
259 for (;ibin<binupper;++ibin)
260 err[ibin] = std::abs(cont[ibin]);
262 if (binupper==m_nbins)
264 binoffset = binupper;
269 //____________________________________________________________________
271 inline double FlexErrArrayGroup<T>::Integral() const
274 if (m_chunksallocated) {
275 unsigned chunkIndex(0);
276 const unsigned chunkupper(LWHistBitUtils::posMSB(m_chunksallocated));
277 for (unsigned ichunk = LWHistBitUtils::posLSB(m_chunksallocated)-1; ichunk<chunkupper;++ichunk) {
278 if (LWHistBitUtils::bitIsSet<uint8_t>(m_chunksallocated,ichunk)) {
279 FlexBinChunk<T>* chunk = reinterpret_cast<FlexBinChunk<T>*>(m_indices[chunkIndex++]);
280 result += chunk->Integral();
283 assert(chunkIndex==nIndicesUsedByChunks());
288 //____________________________________________________________________
290 inline T FlexErrArrayGroup<T>::getBinContent(unsigned bin) const
293 const FlexBinChunk<T>* chunk = getChunkNoAlloc(getGroupIndex(bin));
294 return chunk ? chunk->getBinContent(getChunkBin(bin)) : 0;
297 //____________________________________________________________________
299 inline double FlexErrArrayGroup<T>::getBinError(unsigned bin) const
302 const double * errchunk = getErrChunkNoAlloc(getGroupIndex(bin));
304 const double& err = errchunk[getChunkBin(bin)];
305 return sqrt(fabs(err==ERRCHUNK_NOTSET_MAGICVALUE?getBinContent(bin):err));
307 return sqrt(std::abs(getBinContent(bin)));
311 //____________________________________________________________________
313 inline void FlexErrArrayGroup<T>::getBinContentAndError(unsigned bin, T& content, double& error ) const
316 const unsigned igroup(getGroupIndex(bin));
317 const FlexBinChunk<T>* chunk = getChunkNoAlloc(igroup);
318 content = (chunk ? chunk->getBinContent(getChunkBin(bin)) : 0);
319 if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
320 double * errchunk = reinterpret_cast<double*>
321 (m_indices[nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup)]);
323 double& err = errchunk[getChunkBin(bin)];
324 error = sqrt(fabs(err==ERRCHUNK_NOTSET_MAGICVALUE?content:err));
326 error = (content ? sqrt(std::abs(content)) : 0);
330 //____________________________________________________________________
332 inline void FlexErrArrayGroup<T>::setBinContent( unsigned bin,
334 STRICT_ROOT_PAR(bool pretendSumWMode) )
337 #ifdef LW_STRICT_ROOT_BEHAVIOUR
338 //If pretendSumWMode, this call must not change the error!
339 double pre_error = pretendSumWMode ? getBinError(bin) : -1;
342 const unsigned igroup(getGroupIndex(bin));
344 FlexBinChunk<T>* chunk = getChunkNoAlloc(igroup);
346 chunk->setBinContent(getChunkBin(bin),val);
347 assert(chunk->getBinContent(getChunkBin(bin))==val);
349 #ifdef LW_STRICT_ROOT_BEHAVIOUR
351 getErrChunk(igroup)[getChunkBin(bin)] = pre_error*pre_error;
352 assert(!pretendSumWMode||fabs(pre_error-getBinError(bin))<1.0e-10);
356 FlexBinChunk<T>* chunk = getChunk(igroup);
357 chunk->setBinContent(getChunkBin(bin),val);
359 assert(chunk->getBinContent(getChunkBin(bin))==val||bothNaN(chunk->getBinContent(getChunkBin(bin)),val));
360 #ifdef LW_STRICT_ROOT_BEHAVIOUR
362 getErrChunk(igroup)[getChunkBin(bin)] = pre_error*pre_error;
363 assert(!pretendSumWMode||fabs(pre_error-getBinError(bin))<1.0e-10);
367 //____________________________________________________________________
369 inline void FlexErrArrayGroup<T>::setBinError(unsigned bin, const double& error)
372 const unsigned igroup(getGroupIndex(bin));
373 double * errchunk(0);
374 const unsigned nbefore = nIndicesUsedByChunks()+LWHistBitUtils::countSetBitsBefore<uint8_t>(m_sumw2allocated,igroup);
375 if (LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
376 errchunk = reinterpret_cast<double*>(m_indices[nbefore]);
378 if (error==0&&getBinContent(bin)==0)
379 return;//If bin content is also zero, we avoid allocating anything!
380 errchunk = LWPools::acquire<double,FLEXBINCHUNK_NBINS>();
381 //Provide proper start values (= magic values indicating not used):
382 for (unsigned i=0;i<FLEXBINCHUNK_NBINS;++i)
383 errchunk[i] = ERRCHUNK_NOTSET_MAGICVALUE;
384 addIndexPointer(nbefore,errchunk);
385 LWHistBitUtils::setBit<uint8_t>(m_sumw2allocated,igroup);
387 errchunk[getChunkBin(bin)] = error*error;
388 assert(fabs(getBinError(bin)-fabs(error))<1.0e-10||bothNaN(getBinError(bin),error));
392 //____________________________________________________________________
394 inline void FlexErrArrayGroup<T>::setBinContentAndError( unsigned bin,
396 const double& error )
399 const unsigned igroup(getGroupIndex(bin));
402 FlexBinChunk<T>* chunk = getChunkNoAlloc(igroup);
404 chunk->setBinContent(getChunkBin(bin),0);
405 if (error==0&&(!m_sumw2allocated||!LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)))
408 //Set bin content in any case:
409 getChunk(igroup)->setBinContent(getChunkBin(bin),content);
410 //If error is not already allocated *and* it is compatible with
411 //the content, then we can avoid setting it:
412 if (!m_sumw2allocated||!LWHistBitUtils::bitIsSet<uint8_t>(m_sumw2allocated,igroup)) {
413 double implied_error(sqrt(std::abs(content)));
414 if (fabs(error-implied_error)/(1+std::max<double>(error,implied_error))<1.0e-10)
419 //Get error chunk and set the relevant entry to "error"
420 setBinError(bin,error);//Fixme: Put only necessary code here rather than calling this:
424 assert(getBinContent(bin)==content);
425 assert(fabs(getBinError(bin)-fabs(error))<1.0e-10);
428 //____________________________________________________________________
430 inline void FlexErrArrayGroup<T>::resetActiveBinLoop()
432 m_fastloop_nextbin2check=0;
435 //____________________________________________________________________
437 inline bool FlexErrArrayGroup<T>::getNextActiveBin(unsigned& bin, T& content, double& error)
439 if (m_fastloop_nextbin2check>=m_nbins)
441 bin = m_fastloop_nextbin2check;//do looping using better aligned variable.
442 unsigned igroup(getGroupIndex(bin));
444 //Advance the bin until we find one which has either a binchunk or an error-chunk:
445 const FlexBinChunk<T>* chunk(0);
446 const double* errchunk(0);
447 while (bin<m_nbins) {
448 //Fixme: can do it faster than this, since variables inside stay unchanged!!
449 chunk = getChunkNoAlloc(igroup);
450 errchunk = getErrChunkNoAlloc(igroup);
451 if (!chunk&&!errchunk) {
452 //Should only happen when checking the first bin in a chunk:
453 assert(bin%FLEXBINCHUNK_NBINS==0);
454 bin += FLEXBINCHUNK_NBINS;
458 //Ok, have either chunk or errchunk. See if we can find an entry
459 //in it, otherwise we progress to the next chunk:
460 unsigned chunkbin = bin-igroup*FLEXBINCHUNK_NBINS;
461 assert(chunkbin<FLEXBINCHUNK_NBINS);
462 for(;chunkbin<FLEXBINCHUNK_NBINS;++chunkbin) {
463 content = chunk ? chunk->getBinContent(chunkbin) : 0;
465 const double& err = errchunk[chunkbin];
466 error = ( err == ERRCHUNK_NOTSET_MAGICVALUE ? content : err );
470 error = ( error ? sqrt(fabs(error)) : 0 );
471 if (content!=0||error!=0) {
472 bin = igroup*FLEXBINCHUNK_NBINS+chunkbin;
473 m_fastloop_nextbin2check = bin+1;
478 bin = FLEXBINCHUNK_NBINS*igroup;