2 Copyright (C) 2002-2023 CERN for the benefit of the ATLAS collaboration
7 #define FLEXBINCHUNK_ALLEMPTYSTAGE (static_cast<uint32_t>(0x00000000))
8 #define FLEXBINCHUNK_ALLFULLSTAGE (static_cast<uint32_t>(0xFFFFFFFF))
9 #define FLEXBINCHUNK_ALLCHARSTAGE (static_cast<uint32_t>(0x55555555))
10 #define FLEXBINCHUNK_ALLSHORTSTAGE (static_cast<uint32_t>(0xAAAAAAAA))
12 template <class T> inline unsigned FlexBinChunk_stageByType() { return 0x3; }
13 template <> inline unsigned FlexBinChunk_stageByType<uint8_t>() { return 0x1; }
14 template <> inline unsigned FlexBinChunk_stageByType<uint16_t>() { return 0x2; }
16 //____________________________________________________________________
18 template <class Told, class Tnew>
19 // Disable ubsan to turn off unaligned access warnings.
20 unsigned FlexBinChunk<T>::changeBinStage NO_SANITIZE_UNDEFINED (unsigned bin, unsigned offset)
22 assert(bin<FLEXBINCHUNK_NBINS);
23 assert(sizeof(Told)<=sizeof(Tnew));
24 assert(offset==calcOffset(bin));
26 T oldval = getBinContent(bin);
28 //NB: sizeof(Told)==sizeof(Tnew) is used to indicate step from an
29 //empty stage (nb: available compile-time!):
30 const bool prevStageEmpty(sizeof(Tnew)==sizeof(Told));
31 assert(prevStageEmpty==(getStage(bin)==0x0));
33 //if !prevStageEmpty, then we know (compile-time) that m_data!=0:
34 if (prevStageEmpty && !m_data) {
35 //First allocation in chunk:
36 m_data = LWPools::acquire(sizeof(Tnew));
37 for (unsigned i = 0; i<sizeof(Tnew);++i)
38 m_data[i] = 0;//fixme: reinterpret cast instead of loop.
39 setStage(bin,FlexBinChunk_stageByType<Tnew>());
40 assert(getBinContent(bin)==oldval);
44 //need to swap out current m_data with a new array:
45 const int extrasize = sizeof(Tnew)+(prevStageEmpty?0:-sizeof(Told));
46 const int oldsize = LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(m_stages);//NB: we already have some of the value in "offset"
47 const int newsize(oldsize+extrasize);
48 char * newdata = LWPools::acquire(newsize);
51 memcpy(newdata,m_data,offset);
52 //New bins (we set them afterwards actually... can't get it to work otherwise...):
55 newval = (*(reinterpret_cast<Told*>(&(m_data[offset]))));
57 if (oldsize>int(offset))
58 memcpy(&(newdata[offset+extrasize]),&(m_data[offset]),oldsize-offset);
60 LWPools::release(m_data,oldsize);
62 setStage(bin,FlexBinChunk_stageByType<Tnew>());
63 getBinValRef<Tnew>(offset) = newval;
64 assert(getBinContent(bin)==oldval);
68 //____________________________________________________________________
71 // Disable ubsan to turn off unaligned access warnings.
72 T2& FlexBinChunk<T>::getBinValRef NO_SANITIZE_UNDEFINED (unsigned offset)
75 return *(reinterpret_cast<T2*>(&(m_data[offset])));
78 //____________________________________________________________________
81 // Disable ubsan to turn off unaligned access warnings.
82 T2 FlexBinChunk<T>::getBinVal NO_SANITIZE_UNDEFINED (unsigned offset) const
85 return *(reinterpret_cast<T2*>(&(m_data[offset])));
88 //____________________________________________________________________
90 unsigned FlexBinChunk<T>::calcOffset(unsigned bin ) const
92 assert(bin<FLEXBINCHUNK_NBINS);
93 assert(FLEXBINCHUNK_NBINS*2==sizeof(uint32_t)*8);
94 return bin? LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(m_stages >> (2*(FLEXBINCHUNK_NBINS-bin))) : 0;
97 //____________________________________________________________________
99 FlexBinChunk<T>::FlexBinChunk()
100 : m_data(0), m_stages(0)
102 assert(sizeof(T)*FLEXBINCHUNK_NBINS<=UCHAR_MAX+1);
103 assert(LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(FLEXBINCHUNK_ALLEMPTYSTAGE)==0);
104 assert(LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(FLEXBINCHUNK_ALLFULLSTAGE)==sizeof(T)*FLEXBINCHUNK_NBINS);
105 assert(LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(FLEXBINCHUNK_ALLCHARSTAGE)==sizeof(uint8_t)*FLEXBINCHUNK_NBINS);
106 assert(LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(FLEXBINCHUNK_ALLSHORTSTAGE)==sizeof(uint16_t)*FLEXBINCHUNK_NBINS);
109 //stress-test setStage/getStage;
111 for (unsigned bin=0;bin<FLEXBINCHUNK_NBINS;++bin)
112 for (unsigned stagetest=0;stagetest<4;++stagetest)
114 setStage(bin,stagetest);
115 assert(stagetest==getStage(bin));
121 //____________________________________________________________________
123 FlexBinChunk<T>::~FlexBinChunk()
126 LWPools::release(m_data,LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(m_stages));
131 //____________________________________________________________________
133 // Disable ubsan to turn off unaligned access warnings.
134 void FlexBinChunk<T>::fill NO_SANITIZE_UNDEFINED (unsigned bin)
136 assert(bin<FLEXBINCHUNK_NBINS);
138 #if FLEXBINCHUNK_CONVERTALLTHRESHOLD != FLEXBINCHUNK_NBINS
139 //fill(x) gives priority to allcharstage.
140 if (m_stages==FLEXBINCHUNK_ALLCHARSTAGE) {
141 uint8_t* bc = &((reinterpret_cast<uint8_t*>(m_data))[bin]);
142 if (*bc==UCHAR_MAX) {
143 changeBinStage<uint8_t,uint16_t>(bin,calcOffset(bin));
150 if (m_stages==FLEXBINCHUNK_ALLFULLSTAGE) {
151 // cppcheck-suppress invalidPointerCast
152 ++((reinterpret_cast<T*>(m_data))[bin]);
155 if (m_stages==FLEXBINCHUNK_ALLSHORTSTAGE) {
156 uint16_t* bs = &((reinterpret_cast<uint16_t*>(m_data))[bin]);
157 if (*bs==USHRT_MAX) {
158 changeBinStage<uint16_t,T>(bin,calcOffset(bin));
167 const unsigned offset = calcOffset(bin);
168 const unsigned stage = getStage(bin);
172 ++(getBinValRef<T>(offset));
177 unsigned n = changeBinStage<uint8_t,uint8_t>(bin,offset);
178 ++(getBinValRef<uint8_t>(offset));
179 FLEXBINCHUNK_TESTMOVEALLBINS(n);
184 uint8_t * bc = &(getBinValRef<uint8_t>(offset));
185 if (*bc==UCHAR_MAX) {
186 unsigned n = changeBinStage<uint8_t,uint16_t>(bin,offset);
187 ++(getBinValRef<uint16_t>(offset));
188 FLEXBINCHUNK_TESTMOVEALLBINS(n);
196 uint16_t * bs = &(getBinValRef<uint16_t>(offset));
197 if (*bs==USHRT_MAX) {
198 unsigned n = changeBinStage<uint16_t,T>(bin,offset);
199 ++(getBinValRef<T>(offset));
200 FLEXBINCHUNK_TESTMOVEALLBINS(n);
211 //____________________________________________________________________
213 unsigned FlexBinChunk<T>::moveToFullStage(unsigned bin, unsigned currentstage,unsigned offset)
215 assert(currentstage!=0x3);
216 assert(currentstage==0x0||currentstage==0x1||currentstage==0x2);
217 switch(currentstage) {
218 case 0x0: return changeBinStage<T,T>(bin,offset);
219 case 0x1: return changeBinStage<uint8_t,T>(bin,offset);
220 case 0x2: return changeBinStage<uint16_t,T>(bin,offset);
227 //____________________________________________________________________
229 // Disable ubsan to turn off unaligned access warnings.
230 void FlexBinChunk<T>::fill NO_SANITIZE_UNDEFINED (unsigned bin, const double& weight)
232 assert(bin<FLEXBINCHUNK_NBINS);
234 T expectedval(getBinContent(bin)+T(weight));//NB: ignores bounds in case of integers
237 #if FLEXBINCHUNK_CONVERTALLTHRESHOLD != FLEXBINCHUNK_NBINS
238 if (m_stages==FLEXBINCHUNK_ALLFULLSTAGE) {
239 //Special case: all bins in full mode. Deal with this fast.
240 // cppcheck-suppress invalidPointerCast
241 T* data = reinterpret_cast<T*>(m_data);
242 data[bin] += static_cast<T>(weight);
243 assert(std::fabs(getBinContent(bin)-expectedval)<1.0e-5);
246 //In this method we only give priority to "all-char-stage or
247 //all-short-stage" in the case of integers (because fill(x,w) usually moves directly to full)
248 if (std::numeric_limits<T>::is_integer) {
249 if (m_stages==FLEXBINCHUNK_ALLCHARSTAGE) {
250 uint8_t* bc = &((reinterpret_cast<uint8_t*>(m_data))[bin]);
251 T newval(*bc+T(weight));//fixme: ignores overflow
252 if (newval>UCHAR_MAX||newval<0) {
253 if (newval>=0&&newval<USHRT_MAX)
254 changeBinStage<uint8_t,uint16_t>(bin,calcOffset(bin));
256 changeBinStage<uint8_t,T>(bin,calcOffset(bin));
259 *bc = static_cast<uint8_t>(newval);
263 //NB: We ignore the m_stages==FLEXBINCHUNK_ALLSHORTSTAGE for now (fixme? worth it?)
267 const unsigned offset = calcOffset(bin);
268 unsigned stage = getStage(bin);
270 if (std::numeric_limits<T>::is_integer) {
271 //Special rounding and bounds checking - as is done in ROOT:
274 getBinValRef<T>(offset)=std::min<T>(2147483647,std::max<T>(-2147483647,(getBinVal<T>(offset))+T(weight)));
275 //NB: How do we know that we don't overflow here in the
276 // old+T(weight)??? (ROOT bug?) make validation of ROOT
277 // compat in that scenario.
278 assert(getBinContent(bin)==expectedval);
282 //Figure out the final value we are aiming for:
283 T newval = T(weight);//could also overflow here...
287 case 0x1: newval += (getBinVal<uint8_t>(offset)); break;
288 case 0x2: newval += (getBinVal<uint16_t>(offset)); break;
290 default: assert(false); break;
292 assert(newval==expectedval);
294 //Do we need to move to full stage?:
295 unsigned newarrsize(0);
296 if (stage!=0x3&&(newval<0||newval>USHRT_MAX)) {
297 newarrsize = moveToFullStage(bin,stage,offset);
301 //Standard ROOT treatment in case of full stage:
303 if ( newval > -2147483647 && newval < 2147483647 ) {
304 (getBinValRef<T>(offset)) = newval;
305 assert(getBinContent(bin)==newval);
306 assert(getBinContent(bin)==expectedval);
307 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
310 if (newval < -2147483647)
311 (getBinValRef<T>(offset)) = -2147483647;
312 else if (newval > 2147483647)
313 (getBinValRef<T>(offset)) = 2147483647;
315 assert(getBinContent(bin)==newval);
316 assert(getBinContent(bin)==expectedval);
317 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
320 //Do we need to move to short stage?
321 if (stage!=0x2&&(newval>UCHAR_MAX)) {
323 newarrsize = changeBinStage<uint8_t,uint16_t>(bin,offset);
326 newarrsize = changeBinStage<uint16_t,uint16_t>(bin,offset);
332 (getBinValRef<uint16_t>(offset)) = static_cast<uint16_t>(newval);
333 assert(getBinContent(bin)==newval);
334 assert(getBinContent(bin)==expectedval);
335 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
339 assert(stage==0x1||stage==0x0);
342 assert(getBinContent(bin)==newval);
343 assert(getBinContent(bin)==expectedval);
344 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
347 newarrsize = changeBinStage<uint8_t,uint8_t>(bin,offset);
350 (getBinValRef<uint8_t>(offset)) = static_cast<uint8_t>(newval);
351 assert(getBinContent(bin)==expectedval);
352 assert(getBinContent(bin)==newval);
353 assert(std::abs(getBinContent(bin)-expectedval)<1.0e-5);
354 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
357 unsigned newarrsize(0);
359 newarrsize = moveToFullStage(bin,stage,offset);
360 //stage = 0x3;//not needed...
362 assert(getStage(bin)==0x3);
363 (getBinValRef<T>(offset)) += static_cast<T>(weight);
364 assert(std::abs(getBinContent(bin)-expectedval)<1.0e-5);
365 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
369 //____________________________________________________________________
371 T FlexBinChunk<T>::getBinContent(unsigned bin) const
373 assert(bin<FLEXBINCHUNK_NBINS);
374 #if FLEXBINCHUNK_CONVERTALLTHRESHOLD != FLEXBINCHUNK_NBINS
375 if (m_stages==FLEXBINCHUNK_ALLFULLSTAGE) {
376 //Special case: all bins in full mode. Deal with this fast.
377 // cppcheck-suppress invalidPointerCast
378 return ((reinterpret_cast<T*>(m_data))[bin]);
382 const unsigned offset = calcOffset(bin);
383 const unsigned stage = getStage(bin);
385 case 0x3: return (getBinVal<T>(offset));
387 case 0x1: return (getBinVal<uint8_t>(offset));
388 case 0x2: return (getBinVal<uint16_t>(offset));
395 //____________________________________________________________________
397 // Disable ubsan to turn off unaligned access warnings.
398 void FlexBinChunk<T>::setBinContent NO_SANITIZE_UNDEFINED (unsigned bin, const T& val)
400 assert(bin<FLEXBINCHUNK_NBINS);
402 #if FLEXBINCHUNK_CONVERTALLTHRESHOLD != FLEXBINCHUNK_NBINS
403 if (m_stages==FLEXBINCHUNK_ALLFULLSTAGE) {
404 //Special case: all bins in full mode. Deal with this fast.
405 // cppcheck-suppress invalidPointerCast
406 ((reinterpret_cast<T*>(m_data))[bin]) = val;
409 //Fixme/todo: for is_integer we could test for all-char-mode and all-short-mode also
411 const unsigned offset = calcOffset(bin);
412 unsigned stage = getStage(bin);
413 unsigned newarrsize(0);
416 if (std::numeric_limits<T>::is_integer&&val>=0&&val<=USHRT_MAX) {
417 //In case of integers we might not need to move to the full stage:
420 unsigned newarrsize(0);
422 newarrsize = changeBinStage<uint16_t,uint16_t>(bin,offset);
424 newarrsize = changeBinStage<uint8_t,uint16_t>(bin,offset);
425 assert(getStage(bin)==0x2);
426 (getBinValRef<uint16_t>(offset)) = static_cast<uint16_t>(val);
427 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
430 assert(val<=UCHAR_MAX);
431 //needs stage 0x1 (or stage 0x2)
433 (getBinValRef<uint16_t>(offset)) = static_cast<uint16_t>(val);
434 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
437 unsigned newarrsize(0);
439 newarrsize = changeBinStage<uint8_t,uint8_t>(bin,offset);
440 (getBinValRef<uint8_t>(offset)) = static_cast<uint8_t>(val);
441 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
445 assert(false);//should have returned
447 newarrsize = moveToFullStage(bin,stage,offset);
450 assert(getStage(bin)==0x3);
451 (getBinValRef<T>(offset)) = val;
452 FLEXBINCHUNK_TESTMOVEALLBINS(newarrsize);
455 //____________________________________________________________________
457 void FlexBinChunk<T>::setStage(unsigned bin, unsigned stage/*0x0, 0x1, 0x2 or 0x3*/)
459 assert(getStage(bin)<=stage);
460 const unsigned shift(FLEXBINCHUNK_NBINS*2-(bin+1)*2);
462 //set the two target bits to zero:
463 m_stages &= (0xFFFFFFFF^(0x3<<shift));
465 //set the two target bits to their target values:
466 m_stages |= (stage<<shift);
469 assert(getStage(bin)==stage);
472 //____________________________________________________________________
474 void FlexBinChunk<T>::copyContents(T*cont) const
476 //Assumes cont is nulled out.
478 //In case of integers we might have avoided allocations:
479 if (std::numeric_limits<T>::is_integer&&!m_stages)
483 #if FLEXBINCHUNK_CONVERTALLTHRESHOLD != FLEXBINCHUNK_NBINS
484 if (m_stages==FLEXBINCHUNK_ALLFULLSTAGE) {
485 memcpy(cont,m_data,FLEXBINCHUNK_NBINS*sizeof(T));
488 if (m_stages==FLEXBINCHUNK_ALLCHARSTAGE) {
489 for (unsigned ibin=0;ibin<FLEXBINCHUNK_NBINS;++ibin)
490 cont[ibin] = reinterpret_cast<uint8_t*>(m_data)[ibin];
493 if (m_stages==FLEXBINCHUNK_ALLSHORTSTAGE) {
494 for (unsigned ibin=0;ibin<FLEXBINCHUNK_NBINS;++ibin)
495 cont[ibin] = reinterpret_cast<uint16_t*>(m_data)[ibin];
501 const uint32_t msb = LWHistBitUtils::posMSB(m_stages);
502 const unsigned firstbin=FLEXBINCHUNK_NBINS-((msb>>1)+(msb&0x1));//parantheses is (msb/2+msb%2)
503 const uint32_t lsb = LWHistBitUtils::posLSB(m_stages);
504 const unsigned lastbinplusone=(1+FLEXBINCHUNK_NBINS)-((lsb>>1)+(lsb&0x1));//parantheses is (lsb/2+lsb%2)
505 for (unsigned bin=firstbin;bin<lastbinplusone;++bin) {
506 switch (getStage(bin)) {
507 case 0x0: break;//Don't do cont[bin] = 0 since we assume cont to already be nulled out.
508 case 0x3: cont[bin] = (getBinVal<T>(offset)); offset += sizeof(T); break;
509 case 0x1: cont[bin] = (getBinVal<uint8_t>(offset)); offset += sizeof(uint8_t); break;
510 case 0x2: cont[bin] = (getBinVal<uint16_t>(offset)); offset += sizeof(uint16_t); break;
518 //____________________________________________________________________
520 inline double FlexBinChunk<T>::Integral() const
522 //In case of integers we might have avoided allocations:
523 if (std::numeric_limits<T>::is_integer&&!m_stages)
528 const uint32_t msb = LWHistBitUtils::posMSB(m_stages);
529 const unsigned firstbin=FLEXBINCHUNK_NBINS-((msb>>1)+(msb&0x1));//parantheses is (msb/2+msb%2)
530 const uint32_t lsb = LWHistBitUtils::posLSB(m_stages);
531 const unsigned lastbinplusone=(1+FLEXBINCHUNK_NBINS)-((lsb>>1)+(lsb&0x1));//parantheses is (lsb/2+lsb%2)
532 for (unsigned bin=firstbin;bin<lastbinplusone;++bin) {
533 switch (getStage(bin)) {
535 case 0x3: result += (getBinVal<T>(offset)); offset += sizeof(T); break;
536 case 0x1: result += (getBinVal<uint8_t>(offset)); offset += sizeof(uint8_t); break;
537 case 0x2: result += (getBinVal<uint16_t>(offset)); offset += sizeof(uint16_t); break;
546 #if FLEXBINCHUNK_CONVERTALLTHRESHOLD != FLEXBINCHUNK_NBINS
547 //____________________________________________________________________
549 void FlexBinChunk<T>::possibleMoveAllBins(unsigned arrsize)
552 //We only do this for chunks where we know all 16 bins will potentially be used => require last bin non-empty.
553 if ((arrsize<=(FLEXBINCHUNK_CONVERTALLTHRESHOLD*sizeof(uint8_t)))||!(m_stages&0x00000003))
555 assert(arrsize==LWHistBitUtils::totalSummedOffsetInStages<sizeof(T)>(m_stages));
556 if (arrsize>FLEXBINCHUNK_NBINS*sizeof(uint16_t)) {
557 if (arrsize>FLEXBINCHUNK_CONVERTALLTHRESHOLD*sizeof(T)&&arrsize!=FLEXBINCHUNK_NBINS*sizeof(T)) {
558 //Move all bins to stage 0x1:
559 moveAllBinsToStage<T>(arrsize);
561 } else if (arrsize>FLEXBINCHUNK_NBINS*sizeof(uint8_t)) {
562 if (arrsize>FLEXBINCHUNK_CONVERTALLTHRESHOLD*sizeof(uint16_t)&&arrsize!=FLEXBINCHUNK_NBINS*sizeof(uint16_t)) {
563 //If there are no stages set to 0x3 we can move all bins to stage 0x2:
564 for (unsigned ibin=0;ibin<FLEXBINCHUNK_NBINS;++ibin)
565 if (getStage(ibin)==0x3)
567 moveAllBinsToStage<uint16_t>(arrsize);
570 assert(arrsize>FLEXBINCHUNK_CONVERTALLTHRESHOLD*sizeof(uint8_t));
571 if (arrsize!=FLEXBINCHUNK_NBINS*sizeof(uint8_t)) {
572 //If there are no stages set to 0x2 or 0x3 we can move all bins to stage 0x1:
573 if (!(m_stages&0xAAAAAAAA))
574 moveAllBinsToStage<uint8_t>(arrsize);
578 //____________________________________________________________________
580 template <class Tnew>
581 void FlexBinChunk<T>::moveAllBinsToStage(uint16_t oldallocsize)
584 for (unsigned ibin=0;ibin<FLEXBINCHUNK_NBINS;++ibin)
585 assert(getStage(ibin)<=FlexBinChunk_stageByType<Tnew>());
586 bool moveatleastonebin(false);
587 for (unsigned ibin=0;ibin<FLEXBINCHUNK_NBINS;++ibin)
588 if (getStage(ibin)<FlexBinChunk_stageByType<Tnew>())
589 moveatleastonebin = true;
590 assert(moveatleastonebin);
592 char * newdata = LWPools::acquire(FLEXBINCHUNK_NBINS*sizeof(Tnew));
593 if (sizeof(Tnew)==sizeof(T)) {
594 assert(m_stages!=FLEXBINCHUNK_ALLFULLSTAGE);
595 for (unsigned ibin=0;ibin<FLEXBINCHUNK_NBINS;++ibin)
596 reinterpret_cast<Tnew*>(newdata)[ibin]=static_cast<Tnew>(getBinContent(ibin));
597 m_stages = FLEXBINCHUNK_ALLFULLSTAGE;
599 //Note that getBinContent might return a float-type!
600 assert(sizeof(Tnew)==sizeof(uint8_t)||sizeof(Tnew)==sizeof(uint16_t));
601 for (unsigned ibin=0;ibin<FLEXBINCHUNK_NBINS;++ibin) {
602 if (std::numeric_limits<T>::is_integer) {
603 assert(getBinContent(ibin)>=0);
604 assert(getBinContent(ibin)<=(sizeof(Tnew)==sizeof(uint8_t)?UCHAR_MAX:USHRT_MAX));
605 reinterpret_cast<Tnew*>(newdata)[ibin] = static_cast<Tnew>(getBinContent(ibin));
607 assert(static_cast<int>(getBinContent(ibin)+0.5)>=0);
608 assert(static_cast<int>(getBinContent(ibin)+0.5)<=(sizeof(Tnew)==sizeof(uint8_t)?UCHAR_MAX:USHRT_MAX));
609 reinterpret_cast<Tnew*>(newdata)[ibin]= static_cast<Tnew>(getBinContent(ibin)+0.5);
612 assert(m_stages!=(sizeof(Tnew)==sizeof(uint8_t) ? FLEXBINCHUNK_ALLCHARSTAGE : FLEXBINCHUNK_ALLSHORTSTAGE));
613 m_stages = (sizeof(Tnew)==sizeof(uint8_t) ? FLEXBINCHUNK_ALLCHARSTAGE : FLEXBINCHUNK_ALLSHORTSTAGE);
615 LWPools::release(m_data,oldallocsize);