If clusters have been split due to bad strips, would require a whole lot of new logic to recalculate hitsInThirdTimeBin word - instead, just find when this is the case here, and set hitsInThirdTimeBin to zero later on
clusters had been split - recalculating HitsInThirdTimeBin too difficult to be worthwhile for this rare corner case..
312 {
314
316
317 SCT_ClusterCollection* nullResult(nullptr);
318 if (collection.empty()) {
320 return nullResult;
321 }
322
323
324 std::vector<const SCT_RDORawData*> collectionCopy(collection.begin(), collection.end());
325 if (collection.size() not_eq 1)
std::sort(collectionCopy.begin(), collectionCopy.end(), strip_less_than());
326
327
328
329 cache.currentVector.clear();
330 cache.idGroups.clear();
331 cache.tbinGroups.clear();
332 int n01X(0);
333 int n11X(0);
334 unsigned int previousStrip(0);
336 int stripCount(0);
337 for (const SCT_RDORawData* pRawData: collectionCopy) {
338 const Identifier firstStripId(pRawData->identify());
339 const unsigned int nStrips(pRawData->getGroupSize());
340 const int thisStrip(idHelper.
strip(firstStripId));
343
344
345 if (not
adjacent(thisStrip, previousStrip) and not(cache.currentVector.empty())) {
347 if (n01X >= n11X) {
348 cache.idGroups.push_back(cache.currentVector);
349 }
350 } else {
351
352 cache.idGroups.push_back(cache.currentVector);
353 }
354 cache.currentVector.clear();
355 n01X=0;
356 n11X=0;
357 cache.tbinGroups.push_back(hitsInThirdTimeBin);
358 hitsInThirdTimeBin =0;
359 stripCount = 0;
360 }
361
362
363 bool passTiming(true);
364 bool pass01X(true);
365 bool passX1X(true);
366 const SCT3_RawData* pRawData3(dynamic_cast<const SCT3_RawData*>(pRawData));
367 if (!pRawData3) {
368 ATH_MSG_ERROR(
"Casting into SCT3_RawData failed. This is probably caused by use of an old RDO file.");
369 return nullptr;
370 }
371 const int timeBin(pRawData3->getTimeBin());
372 std::bitset<3> timePattern(static_cast<unsigned long>(timeBin));
374
377 if (pass01X) n01X++;
378 if (passX1X and (not pass01X)) n11X++;
380 if ((BEC==0) and (layer==0) and passX1X) passTiming=true;
381 else passTiming = pass01X;
383 if ((BEC==0) and (layer==0 or layer==1) and passX1X) passTiming=true;
384 else passTiming = pass01X;
385 }
386
387
388
394 } else {
396 }
397 for (
unsigned int iStrip=0; iStrip<
nStrips; iStrip++) {
398 if (stripCount < 16) hitsInThirdTimeBin |= (timePattern.test(0) << stripCount);
399 stripCount++;
400 }
401 }
402 if (not cache.currentVector.empty()) {
403
404 previousStrip = idHelper.
strip(cache.currentVector.back());
405 }
406 }
407
408
409 if (not cache.currentVector.empty()) {
411 cache.idGroups.push_back(cache.currentVector);
412 cache.tbinGroups.push_back(hitsInThirdTimeBin);
413 hitsInThirdTimeBin=0;
414 }
415 }
416
417
418 const Identifier elementID(collection.identify());
419 const Identifier waferId{idHelper.
wafer_id(elementID)};
420 const IdentifierHash waferHash{idHelper.
wafer_hash(waferId)};
421 SG::ReadCondHandle<InDetDD::SiDetectorElementCollection> sctDetEleHandle(
m_SCTDetEleCollKey, ctx);
422 const InDetDD::SiDetectorElementCollection* sctDetEle(*sctDetEleHandle);
423 if (not sctDetEleHandle.isValid() or sctDetEle==nullptr) {
425 return nullResult;
426 }
427 const InDetDD::SiDetectorElement* element(sctDetEle->getDetectorElement(waferHash));
428 if (!element) {
430 return nullResult;
431 }
432
433 const InDetDD::SCT_ModuleSideDesign* design;
435 design = (static_cast<const InDetDD::SCT_BarrelModuleSideDesign*>(&element->design()));
436 } else {
437 design = (static_cast<const InDetDD::SCT_ForwardModuleSideDesign*>(&element->design()));
438 }
439
440 IdentifierHash idHash(collection.identifyHash());
441 SCT_ClusterCollection* clusterCollection = new SCT_ClusterCollection(idHash);
442 clusterCollection->setIdentifier(elementID);
443 clusterCollection->reserve(cache.idGroups.size());
444
445 if(dataItemsPool){
447 }
448
449
450 std::vector<uint16_t>::iterator tbinIter(cache.tbinGroups.begin());
451
455 const bool badStripInClusterOnThisModuleSide = (cache.idGroups.size() != cache.tbinGroups.size());
456
457 for (
IdVec_t& stripGroup: cache.idGroups) {
458 const int nStrips(stripGroup.size());
459 if (nStrips == 0) continue;
460
461 const InDetDD::SiLocalPosition dummyPos(1, 0);
465 const Amg::Vector2D localPos(clusterDim.centre.xPhi(), clusterDim.centre.xEta());
466
467
468 const Identifier clusterId(stripGroup.front());
469 if (!clusterId.is_valid())
ATH_MSG_VERBOSE(clusterId <<
" is invalid.");
470
471
472 const std::pair<InDetDD::SiLocalPosition, InDetDD::SiLocalPosition> ends(design->
endsOfStrip(clusterDim.centre));
473 const double stripLength(std::abs(ends.first.xEta()-ends.second.xEta()));
474
475
477
479 if (dataItemsPool){
481 }else{
483 }
484
485 (*cluster) =
487 ? (
m_clusterMaker->sctCluster(clusterId, localPos, std::move(stripGroup),
489 : (
SCT_Cluster(clusterId, localPos, std::move(stripGroup), siWidth, element,
490 {}));
491
492 cluster->setHashAndIndex(clusterCollection->identifyHash(),
493 clusterCollection->size());
494 if (tbinIter != cache.tbinGroups.end()) {
495 cluster->setHitsInThirdTimeBin(*tbinIter);
496 ++tbinIter;
497 }
499 if (badStripInClusterOnThisModuleSide) cluster->setHitsInThirdTimeBin(0);
500 clusterCollection->push_back(cluster);
501 }
502
503 return clusterCollection;
504 }
#define ATH_MSG_VERBOSE(x)
#define ATH_MSG_WARNING(x)
pointer nextElementPtr()
obtain the next available element in pool by pointer pool is resized if its limit has been reached On...
virtual std::pair< SiLocalPosition, SiLocalPosition > endsOfStrip(const SiLocalPosition &position) const override=0
give the ends of strips
int layer_disk(const Identifier &id) const
int barrel_ec(const Identifier &id) const
Values of different levels (failure returns 0)
bool is_barrel(const Identifier &id) const
Test for barrel - WARNING: id MUST be sct id, otherwise answer is not accurate. Use SiliconID for gen...
Eigen::Matrix< double, 2, 1 > Vector2D
bool adjacent(unsigned int strip1, unsigned int strip2)
@ VIEW_ELEMENTS
this data object is a view, it does not own its elmts
void sort(typename DataModel_detail::iterator< DVL > beg, typename DataModel_detail::iterator< DVL > end)
Specialization of sort for DataVector/List.