2 Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
4 #include "DefectsEmulatorCondAlgImpl.h"
6 #include "Identifier/Identifier.h"
7 #include "AthenaKernel/RNGWrapper.h"
9 #include "StoreGate/WriteHandle.h"
11 #include <type_traits>
17 #include "CLHEP/Random/RandFlat.h"
19 #include "ModuleIdentifierMatchUtil.h"
20 #include "ConnectedModulesUtil.h"
21 #include "EmulatedDefectsBuilder.h"
24 #include "nlohmann/json.hpp"
26 #include "TVirtualRWMutex.h"
27 #include "ROOT/RNTupleWriter.hxx"
28 #include "ROOT/RNTupleReader.hxx"
29 #include "ROOT/RNTupleModel.hxx"
34 template <typename T_ID>
35 concept IdentifierHelperWithOtherSideConecpt = requires(T_ID id_helper) { id_helper.get_other_side(); };
37 template <class T_ModuleHelper>
38 unsigned int getSensorColumn(const T_ModuleHelper &helper, unsigned int cell_idx) {
39 return cell_idx % helper.nSensorColumns();
41 template <class T_ModuleHelper>
42 unsigned int getSensorRow(const T_ModuleHelper &helper, unsigned int cell_idx) {
43 return cell_idx / helper.nSensorColumns();
46 unsigned int makeCheckerboard(unsigned int cell_idx, unsigned int n_rows, unsigned int n_columns,
50 unsigned int row_i=cell_idx / n_columns;
51 unsigned int col_i = cell_idx % n_columns;
52 unsigned int row_odd = row_i < n_rows/2 || !odd_row_toggle ? 1 : 0;
53 unsigned int div_row = n_rows/7;
54 unsigned int div_cols = n_columns/7;
55 row_i = std::min((((row_i / div_row ) & (0xffff-1)) + row_odd)*div_row + (row_i%div_row), n_rows-1);
56 unsigned int col_odd = col_i < n_columns/2 || !odd_col_toggle ? 1 : 0;
57 col_i = std::min((((col_i / div_cols ) & (0xffff-1)) + col_odd)*div_cols + (col_i%div_cols),n_columns-1);
58 cell_idx= row_i * n_columns + (col_i % n_columns);
62 /** helper to consistently lock and unlock when leaving the scope.
63 * similar to std::lock_guard, but locking when not needed for the scope
68 MyLockGuard(std::mutex &a_mutex, bool disable)
69 : m_mutex( !disable ? &a_mutex : nullptr)
71 if (m_mutex) {m_mutex->lock(); }
74 if (m_mutex) {m_mutex->unlock(); }
81 template <class T_ModuleHelper>
82 void histogramDefects(const T_ModuleHelper &helper,
83 const typename std::pair<typename T_ModuleHelper::KEY_TYPE,typename T_ModuleHelper::KEY_TYPE> &key,
85 std::array<unsigned int,4> ranges_row_col = helper.offlineRange(key);
86 for (unsigned int row_i=ranges_row_col[0]; row_i<ranges_row_col[1]; ++row_i) {
87 for (unsigned int col_i=ranges_row_col[2]; col_i<ranges_row_col[3]; ++col_i) {
88 h2->Fill(col_i, row_i);
93 // create an array containing all possible combinations of M-tuples of numbers of 1 to N
94 // The M numbers are unique per M-tuple, and the order has no significance.
95 template <typename T, std::size_t N_MAX, std::size_t M>
96 constexpr std::array<T, N_MAX> packCombinations(unsigned int N) {
97 std::array<T, N_MAX> packed_permutations{};
99 std::array<unsigned int,M> last_val;
100 for (unsigned int i=0; i<N; ++i) {
104 for (unsigned int i=0; i<N; ++i) {
105 packed_permutations[idx]|= static_cast<T>(1u<<last_val[i]);
110 if (++last_val[i]<=M-(N-i)) {
112 last_val[i]=last_val[i-1]+1;
120 return packed_permutations;
123 // count the number of elements up the first which is zero if it is not the first element.
124 template <typename T, std::size_t N_MAX>
125 constexpr unsigned int findLastNonEmpty(const std::array<T, N_MAX> &arr) {
127 while (++i<arr.size() && arr[i]!=0);
131 // create an arry which contains the number of combinations for the array of combinations.
132 template <typename T, typename T2, std::size_t N_MAX, std::size_t N>
133 constexpr std::array<unsigned char,N> makeCombinationCounts(const std::array<std::array<T2, N_MAX>, N> &arr) {
134 std::array<unsigned char,N> ret;
136 for (const std::array<T2, N_MAX> &sub_arr : arr) {
137 ret[idx++]=static_cast<T>(findLastNonEmpty(sub_arr));
142 // create the possible combinations of defect corners for 1 to 4 defect corners
143 static constexpr std::array< std::array<unsigned char,6>, 4> corner_combinations {
144 packCombinations<unsigned char,6,4>(1),
145 packCombinations<unsigned char,6,4>(2),
146 packCombinations<unsigned char,6,4>(3),
147 packCombinations<unsigned char,6,4>(4),
150 // count the number of possible combinations;
151 static constexpr std::array<unsigned char,4> n_corner_combinations (makeCombinationCounts<unsigned char>(corner_combinations) );
153 template <typename T>
158 bool hasExtensions(const std::string &name, const std::string_view &ext) {
159 return name.size()>=ext.size() && name.substr(name.size()-ext.size(),ext.size())==ext;
162 template <typename T_EmulatedDefects>
163 void toJson(const T_EmulatedDefects &defects, const std::string &name) {
165 for (unsigned int id_hash = 0; id_hash < defects.size(); ++id_hash) {
166 if (defects.isModuleDefect(id_hash) || !defects[id_hash].empty()) {
167 nlohmann::json module_data;
168 if (defects.isModuleDefect(id_hash)) {
169 module_data["isDefect"]=true;
172 module_data["defects"]=defects[id_hash];
175 std::stringstream id_hash_name;
176 id_hash_name << id_hash;
177 data[id_hash_name.str()]=module_data;
180 std::ofstream out(name.c_str());
184 template <typename T_EmulatedDefects>
185 void toRoot(const T_EmulatedDefects &defects, const std::string &name) {
186 ROOT::TWriteLockGuard lock (ROOT::gCoreMutex);
188 auto model=ROOT::RNTupleModel::Create();
189 auto nt_defects=model->MakeField< std::vector<typename T_EmulatedDefects::KEY_TYPE> >("defects");
190 auto nt_is_defect=model->MakeField<bool>("isDefect");
191 ROOT::RNTupleWriteOptions options;
192 options.SetCompression(ROOT::RCompressionSetting::EAlgorithm::kZSTD, ROOT::RCompressionSetting::ELevel::kDefaultZSTD);
193 auto writer = ROOT::RNTupleWriter::Recreate(std::move(model), "defects", name.c_str(), options);
194 for (unsigned int id_hash = 0; id_hash < defects.size(); ++id_hash) {
196 if (defects.isModuleDefect(id_hash)) {
200 *nt_is_defect =false;
201 *nt_defects = defects[id_hash];
207 template <typename KEY_TYPE>
210 bool open(const std::string &name) {
211 std::ifstream input_file(name);
215 input_file >> m_jsDefects;
219 return m_jsDefects.items();
221 template <typename T>
222 static std::size_t moduleHash(const T &element) {
224 return strtol(element.key().c_str(),&ptr, 10);
226 template <typename T>
227 static nlohmann::json value(const T &element) {
228 return element.value();
230 static bool isDefect(const nlohmann::json &value) {
231 return value["isDefect"];
233 static std::vector<KEY_TYPE> moduleDefects(const nlohmann::json &value) {
234 return value["defects"];
237 nlohmann::json m_jsDefects;
240 template <typename KEY_TYPE>
243 bool open(const std::string &name) {
244 auto model=ROOT::RNTupleModel::Create();
245 m_defects=model->MakeField< std::vector<KEY_TYPE> >("defects");
246 m_isDefect=model->MakeField<bool>("isDefect");
247 m_reader = ROOT::RNTupleReader::Open(std::move(model), "defects", name.c_str());
248 return m_reader.get() != nullptr;
250 ROOT::RNTupleReader &items() {
253 static auto moduleHash( std::size_t entry_i) {
256 auto value(std::size_t entry_i) {
257 m_reader->LoadEntry(entry_i);
260 bool isDefect([[maybe_unused]] std::size_t entry_i) {
263 const std::vector<KEY_TYPE> &moduleDefects([[maybe_unused]] std::size_t entry_i) {
267 std::unique_ptr< ROOT::RNTupleReader > m_reader;
268 std::shared_ptr< bool > m_isDefect;
269 std::shared_ptr<std::vector<KEY_TYPE> > m_defects;
272 template <typename T_ModuleHelper, typename T_ReaderAdapter, typename T_EmulatedDefects>
273 void fromFile(const InDetDD::SiDetectorElementCollection &det_ele_coll, T_EmulatedDefects &defects, const std::string &name) {
274 T_ReaderAdapter reader;
275 if (!reader.open(name)) {
276 std::stringstream amsg;
277 amsg << "Failed to read \"" << name << "\"";
278 throw std::runtime_error(amsg.str());
281 for (auto entry_i : reader.items()) {
282 auto module_i = reader.moduleHash(entry_i);
283 assert( module_i < det_ele_coll.size());
284 auto value = reader.value(entry_i);
285 if (reader.isDefect(value)) {
286 defects.setModuleDefect(module_i);
288 else if (!defects.isModuleDefect(module_i)) {
289 std::vector<typename T_EmulatedDefects::KEY_TYPE> &module_defects = defects.at(module_i);
290 if (module_defects.empty()) {
291 module_defects = reader.moduleDefects(value); // move ?
294 InDetDD::SiDetectorElementCollection::const_value_type det_ele = det_ele_coll[module_i];
295 T_ModuleHelper helper(det_ele->design());
296 auto input_defects = reader.moduleDefects(value);
297 for (typename std::vector<typename T_EmulatedDefects::KEY_TYPE>::const_reverse_iterator iter = input_defects.rbegin();
298 iter != input_defects.rend();
301 if (T_ModuleHelper::isRangeKey(*iter)) {
303 if (iter == input_defects.rend()) {
306 EmulatedDefectsBuilder::insertKeyRange(helper,
308 std::make_pair(key, *iter),
309 T_ModuleHelper::getDefectTypeComponent(key));
310 if (T_ModuleHelper::getDefectTypeComponent(key) != T_ModuleHelper::getDefectTypeComponent(*iter)) {
311 auto [end_key_iter, end_iter] = InDet::EmulatedDefects<T_ModuleHelper>::lower_bound(module_defects, *iter);
312 if (end_key_iter != end_iter && T_ModuleHelper::makeBaseKey(*iter) == T_ModuleHelper::makeBaseKey(*end_key_iter)) {
313 *end_key_iter |= T_ModuleHelper::getDefectTypeComponent(*iter);
318 EmulatedDefectsBuilder::insertKey(helper,
321 T_ModuleHelper::getDefectTypeComponent(key));
330 template <typename T_ModuleHelper, typename T_EmulatedDefects>
331 void fromJson(const InDetDD::SiDetectorElementCollection &det_ele_coll, T_EmulatedDefects &defects, const std::string &name) {
332 fromFile<T_ModuleHelper, JsonAdapter<typename T_ModuleHelper::KEY_TYPE> >(det_ele_coll, defects, name);
335 template <typename T_ModuleHelper, typename T_EmulatedDefects>
336 void fromRoot(const InDetDD::SiDetectorElementCollection &det_ele_coll, T_EmulatedDefects &defects, const std::string &name) {
337 ROOT::TReadLockGuard lock (ROOT::gCoreMutex);
338 fromFile<T_ModuleHelper, RootAdapter<typename T_ModuleHelper::KEY_TYPE> >(det_ele_coll, defects, name);
346 template<class T_Derived>
347 StatusCode DefectsEmulatorCondAlgImpl<T_Derived>::initialize(){
348 ATH_CHECK(m_detEleCollKey.initialize());
349 ATH_CHECK(m_writeKey.initialize());
350 ATH_CHECK(detStore()->retrieve(m_idHelper,derived().IDName()));
351 if constexpr( T_ModuleHelper::ROW_BITS==0 || T_ModuleHelper::COL_BITS==0) {
352 if (!m_cornerDefectParamsPerPattern.empty() || !m_cornerDefectNCornerFractionsPerPattern.empty()) {
353 ATH_MSG_ERROR("Corner defects are not supported in this instance but are configured.");
354 return StatusCode::FAILURE;
358 return initializeBase(T_ModuleHelper::nMasks(), m_idHelper->wafer_hash_max());
361 template<class T_Derived>
362 StatusCode DefectsEmulatorCondAlgImpl<T_Derived>::execute(const EventContext& ctx) const {
363 SG::WriteCondHandle<T_EmulatedDefects> defectsOut(m_writeKey, ctx);
364 if (defectsOut.isValid()) {
365 return StatusCode::SUCCESS;
367 SG::ReadCondHandle<InDetDD::SiDetectorElementCollection> detEleColl(m_detEleCollKey,ctx);
368 ATH_CHECK(detEleColl.isValid());
369 defectsOut.addDependency(detEleColl);
371 std::unique_ptr<T_EmulatedDefects> defects = std::make_unique<T_EmulatedDefects>(*(detEleColl.cptr()));
373 for (const std::string &input_file : m_inputFiles.value()) {
374 if (hasExtensions(input_file,".json")) {
375 fromJson<T_ModuleHelper>(*(detEleColl.cptr()),*defects, input_file);
377 else if (hasExtensions(input_file,".root")) {
378 fromRoot<T_ModuleHelper>(*(detEleColl.cptr()), *defects, input_file);
382 // last counts (+1) are counts for entire modules being defect
383 std::vector<std::array<unsigned int,kNCounts> > counts(T_ModuleHelper::nMasks()+1, std::array<unsigned int,kNCounts>{});
385 std::size_t n_error=0u;
386 unsigned int n_defects_total=0;
387 unsigned int module_without_matching_pattern=0u;
389 auto connected_modules = derived().getModuleConnectionMap(*(detEleColl.cptr()));
391 std::array<CLHEP::HepRandomEngine *, kMaskDefects + T_ModuleHelper::nMasks() > rndmEngine;
392 if (this->m_rngPerDefectType) {
393 assert (m_rngName.size() == rndmEngine.size());
395 for (const std::string &a_rngName : m_rngName) {
396 ATHRNG::RNGWrapper* rngWrapper = m_rndmSvc->getEngine(this, a_rngName );
397 rngWrapper->setSeed( a_rngName, ctx );
398 assert( idx < rndmEngine.size());
399 rndmEngine[idx++]=rngWrapper->getEngine(ctx);
403 assert( !m_rngName.empty());
404 ATHRNG::RNGWrapper* rngWrapper = m_rndmSvc->getEngine(this, m_rngName[0]);
405 rngWrapper->setSeed( m_rngName[0], ctx );
406 CLHEP::HepRandomEngine *a_rndmEngine = rngWrapper->getEngine(ctx);
407 for (CLHEP::HepRandomEngine *&elm : rndmEngine) {
412 ModuleIdentifierMatchUtil::ModuleData_t module_data;
413 std::vector<unsigned int> module_pattern_idx;
414 module_pattern_idx.reserve( m_modulePattern.size() );
415 std::vector<double> cumulative_prob_dist;
416 cumulative_prob_dist.reserve( m_modulePattern.size() );
417 std::vector<unsigned int> n_mask_defects;
418 n_mask_defects.reserve( T_ModuleHelper::nMasks());
420 std::size_t det_ele_sz = detEleColl->size();
421 for (unsigned int module_i=0u; module_i < det_ele_sz; ++module_i) {
422 typename T_DetectorElementCollection::const_value_type det_ele = (*detEleColl.cptr())[module_i];
423 if (defects->isModuleDefect(module_i)) continue;
424 if (derived().isModuleDefect(ctx, module_i)) {
425 defects->setModuleDefect(module_i);
426 if (m_histogrammingEnabled && !m_moduleHist.empty()) {
427 // Always fill externally provided defects as if the module would
428 // match the first pattern, since there is no pattern associated to
429 // the external defects.
430 std::lock_guard<std::mutex> lock(m_histMutex);
431 histogramDefectModule(0, 0, module_i, det_ele->center());
436 T_ModuleHelper helper(det_ele->design());
442 ++(counts.back()[ kNElements ]); // module defects
444 // find pattern matching this module
445 const T_ModuleDesign &moduleDesign = dynamic_cast<const T_ModuleDesign &>(det_ele->design());
446 ModuleIdentifierMatchUtil::setModuleData(*m_idHelper,
450 ModuleIdentifierMatchUtil::moduleMatches(m_modulePattern.value(), module_data, module_pattern_idx);
451 if (module_pattern_idx.empty()) {
452 ++module_without_matching_pattern;
456 // it is possible that multiple patterns match a module
457 // For module defects a pattern is selected from all matching patterns randomly. This matters, because
458 // patterns control whether a defect is propagated to all modules connected to the same physical sensor
460 makeCumulativeProbabilityDist(module_pattern_idx,kModuleDefectProb, cumulative_prob_dist);
461 float prob=(cumulative_prob_dist.empty() || cumulative_prob_dist.back()<=0.) ? 1. : CLHEP::RandFlat::shoot(rndmEngine[kModuleDefects],1.);
463 unsigned int match_i=module_pattern_idx.size();
465 for (; match_i-->0; ) {
466 assert( match_i < cumulative_prob_dist.size());
467 if (prob > cumulative_prob_dist[match_i]) break;
471 if (match_i < module_pattern_idx.size()) {
472 unsigned int hist_pattern_i = (m_fillHistogramsPerPattern ? module_pattern_idx[match_i] : 0 );
473 // mark entire module as defect;
474 (counts.back()[ kNDefects ])
475 += 1-defects->isModuleDefect(module_i); // do not double count
476 defects->setModuleDefect(module_i);
477 // if configured accordingly also mark the other "modules" as defect which
478 // are part of the same physical module
479 ATH_MSG_VERBOSE( "Add module defect "
480 << " hash=" << m_idHelper->wafer_hash(det_ele->identify())
481 << " barel_ec=" << m_idHelper->barrel_ec(det_ele->identify())
482 << " layer_disk=" << m_idHelper->layer_disk(det_ele->identify())
483 << " phi_module=" << m_idHelper->phi_module(det_ele->identify())
484 << " eta_module=" << m_idHelper->eta_module(det_ele->identify())
485 << " side=" << ModuleIdentifierMatchUtil::detail::getZeroOrSide(*m_idHelper,det_ele->identify())
486 << " columns_strips=" << module_data[4]);
488 // if histogramming is enabled lock also for the connected modules
489 // to avoid frequent lock/unlocks.
490 // Anyway the algorithm should run only once per job
491 MyLockGuard lock(m_histMutex, m_histogrammingEnabled);
492 if (m_histogrammingEnabled) {
493 histogramDefectModule(module_pattern_idx[match_i], hist_pattern_i, module_i, det_ele->center());
496 if constexpr(IdentifierHelperWithOtherSideConecpt<decltype(*m_idHelper)>) {
497 // propagate module defects to modules connected to the same physical sensor
498 // if configured accordingly.
499 assert( module_pattern_idx[match_i] < m_modulePattern.size());
500 ConnectedModulesUtil::visitMatchingConnectedModules(
502 m_modulePattern[module_pattern_idx[match_i]],
503 *(detEleColl.cptr()),
506 [defects_ptr=defects.get(),
508 module_pattern_i=module_pattern_idx[match_i],
510 this](unsigned int child_id_hash,const InDetDD::SiDetectorElement &connected_det_ele) {
512 (counts.back()[ kNDefects ])
513 += 1-defects_ptr->isModuleDefect(child_id_hash); // do not double count
514 defects_ptr->setModuleDefect(child_id_hash);
516 if (m_histogrammingEnabled) {
517 histogramDefectModule(module_pattern_i, hist_pattern_i, child_id_hash, connected_det_ele.center());
520 ATH_MSG_VERBOSE( "Propagate module defect to other split modules "
521 << " hash=" << m_idHelper->wafer_hash(connected_det_ele.identify())
522 << " barel_ec=" << m_idHelper->barrel_ec(connected_det_ele.identify())
523 << " layer_disk=" << m_idHelper->layer_disk(connected_det_ele.identify())
524 << " phi_module=" << m_idHelper->phi_module(connected_det_ele.identify())
525 << " eta_module=" << m_idHelper->eta_module(connected_det_ele.identify())
526 << " side=" << ModuleIdentifierMatchUtil::detail::getZeroOrSide(*m_idHelper, connected_det_ele.identify()) );
531 unsigned int hist_pattern_i = (m_fillHistogramsPerPattern ? module_pattern_idx.front() : 0 );
533 // throw number of defects of all defect types excluding module defects which are already handled
534 // e.g. chip-defects, core-column defects, single pixel or strip defects
535 unsigned int cells = helper.nCells();
536 std::vector<typename T_ModuleHelper::KEY_TYPE> &module_defects=(*defects).at(module_i);
537 module_defects.reserve( throwNumberOfDefects(std::span(&rndmEngine.data()[kMaskDefects], rndmEngine.size()-kMaskDefects),
539 T_ModuleHelper::nMasks(),
543 // if histogramming is enabled lock for the entire loop, since it would
544 // be rather inefficient to lock for each defect pixel or strip separately and
545 // this algorithm should run only a single time per job anyway.
546 MyLockGuard lock(m_histMutex, m_histogrammingEnabled);
547 auto [matrix_histogram_index, matrix_index]=(m_histogrammingEnabled
548 ? findHist(hist_pattern_i, helper.nSensorRows(), helper.nSensorColumns())
549 : std::make_pair(0u,0u));
551 // create defects starting from the mask (or group) defect covering the largest area (assuming
552 // that masks are in ascending order) e.g. defect chips, core-column defects, individual pixel
554 auto masks = helper.masks();
555 for (unsigned int mask_i = masks.size(); mask_i-->0; ) {
557 assert( mask_i < n_mask_defects.size());
558 counts[mask_i][kNElements] += helper.nElements(mask_i);
559 unsigned int n_defects = n_mask_defects[mask_i];
561 // module with mask (or group) defects i.e. core-column defects, defect chips, ...
562 assert( mask_i < counts.size());
563 counts[mask_i][kMaxDefectsPerModule] = std::max(counts[mask_i][kMaxDefectsPerModule],n_defects);
564 unsigned int current_defects = module_defects.size();
565 assert( !m_histogrammingEnabled || ( hist_pattern_i < m_hist.size() && matrix_histogram_index < m_hist[hist_pattern_i].size()) );
566 TH2 *h2 = (m_histogrammingEnabled ? m_hist.at(hist_pattern_i).at(matrix_histogram_index) : nullptr);
568 for (unsigned int defect_i=0; defect_i < n_defects; ++defect_i) {
569 // retry if a random position has been chosen already, but at most m_maxAttempts times
570 unsigned int attempt_i=0;
571 for (attempt_i=0; attempt_i<m_maxAttempts.value(); ++attempt_i) {
572 // chose random pixel or strip which defines the defect location
573 assert(kMaskDefects+mask_i < rndmEngine.size() );
574 unsigned int cell_idx=CLHEP::RandFlat::shoot(rndmEngine[kMaskDefects+mask_i],cells);
576 if (m_checkerBoardToggle) {
578 // restrict defects on checker board
579 cell_idx=makeCheckerboard(cell_idx,
580 helper.nSensorRows(),
581 helper.nSensorColumns(),
582 m_oddRowToggle.value(),
583 m_oddColToggle.value() );
586 std::pair<typename T_ModuleHelper::KEY_TYPE, typename T_ModuleHelper::KEY_TYPE>
587 key_range( T_ModuleHelper::makeRangeForMask(helper.hardwareCoordinates(getSensorRow(helper, cell_idx),
588 getSensorColumn(helper, cell_idx)),
591 if (EmulatedDefectsBuilder::insertKeyRange(helper, module_defects, key_range, T_ModuleHelper::makeDefectTypeKey(mask_i))) {
593 histogramDefects(helper,key_range,h2);
598 assert( mask_i+1 < counts.size());
599 ++counts[mask_i][ kNRetries ];
601 assert( mask_i+1 < counts.size());
602 counts[mask_i][kNMaxRtriesExceeded] += attempt_i >= m_maxAttempts.value();
604 unsigned int new_defects = module_defects.size() - current_defects;
605 assert( mask_i+1 < counts.size());
606 counts[mask_i][ kNDefects ] += new_defects;
608 ++(counts[mask_i][kNModulesWithDefects]);
613 // Create corner defects only for modules with columns and rows.
614 if constexpr( T_ModuleHelper::ROW_BITS>0 && T_ModuleHelper::COL_BITS>0) {
615 if (!m_cornerDefectParamsPerPattern.empty() && !m_cornerDefectParamsPerPattern[module_pattern_idx.front()].empty()) {
616 if (module_pattern_idx.size()!=1) {
617 ATH_MSG_WARNING("Multiple module pattern match module " << module_i
618 << ", but for corner defects only the parameters are used which are associated to the first pattern" );
620 // create defects at certain corners outside of a circle which crosses the sensor edges at certain positions
621 // chosen in a random range. The edge crossing at the particular corner defines a sagitta of a certain value
622 // chosen in a random range
624 // throw the number of corners with corner defects.
625 float defect_prob=CLHEP::RandFlat::shoot(rndmEngine[kCornerDefects],1.);
626 unsigned int n_corners=m_perPatternCornerDefectNCornerCummulativeProb[module_pattern_idx.front() ].size();
628 n_corners>0 && defect_prob<m_perPatternCornerDefectNCornerCummulativeProb[module_pattern_idx.front() ][n_corners-1];
630 unsigned int n_current_defects =module_defects.size();
631 if (n_corners<m_perPatternCornerDefectNCornerCummulativeProb[module_pattern_idx.front() ].size()) {
632 assert( !m_histogrammingEnabled || ( hist_pattern_i < m_hist.size() && matrix_histogram_index < m_hist[hist_pattern_i].size()) );
633 TH2 *h2 = (m_histogrammingEnabled ? m_hist.at(hist_pattern_i).at(matrix_histogram_index) : nullptr);
635 assert( n_corners < n_corner_combinations.size() );
636 // chose the corners which have defects randomly
637 unsigned int the_combination =CLHEP::RandFlat::shoot(rndmEngine[kCornerDefects],n_corner_combinations[n_corners] );
638 unsigned char corner_mask = corner_combinations[n_corners][the_combination];
639 // columns, rows and their pitch are already in direction of the hardware columns and rows
640 unsigned int hwa_columns = helper.columns();
641 unsigned int hwa_rows = helper.rows();
642 float hwa_columnPitch = helper.columnPitch();
643 float hwa_rowPitch = helper.rowPitch();
645 for (unsigned int corner_i=0; corner_i<4; ++corner_i) {
646 if (corner_mask & (1u << corner_i)) {
647 // chose random positions at which the circle crosses the edges at the particular corner
648 // and chose a random sagitta defined by the corssings.
649 assert( m_cornerDefectParamsPerPattern[module_pattern_idx.front()].size()==kNCornerDefectParams);
650 float rx=m_cornerDefectParamsPerPattern[module_pattern_idx.front()][kCornerDefectWidthColumnDirectionOffset]
651 +CLHEP::RandFlat::shoot(rndmEngine[kCornerDefects],m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectWidthColumnDirection]);
652 float ry=m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkWidthRowDirectionOffset]
653 +CLHEP::RandFlat::shoot(rndmEngine[kCornerDefects],m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkWidthRowDirection]);
657 // make sure that the sagitta is small enough that the circle can actually cross the bounds.
658 // conditions: the circle radius Rc has to be larger than rx and ry. If the circle radius
659 // is equal to rx or ry, the circle will just touch the border.
660 // This leads to quadratic equations x^2 + 2*hp1/2 * x + q solved by x1/2,1/2 = -hp1/2 +- sqrt(hp1/2^2-q)
661 float r2=rx*rx+ry*ry;
662 float hp1=0.5*sqrt(r2)*rx/ry;
663 float hp2=0.5*sqrt(r2)*ry/rx;
665 float sagitta_max = std::min( (-hp1 + sqrt( hp1*hp1 -q )),(-hp2 + sqrt( hp2*hp2 -q )) );
666 float sagitta_range = m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkSagitta];
667 float sagitta_offset = m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkSagittaOffset];
668 sagitta_offset=std::min(sagitta_offset,sagitta_max);
669 sagitta_range=std::min( sagitta_range, sagitta_max - sagitta_offset);
670 if (sagitta_range != m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkSagitta]
671 || sagitta_offset != m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkSagittaOffset]) {
672 ATH_MSG_VERBOSE("limited sagitta range from "
673 << m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkSagittaOffset]
675 << (m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkSagittaOffset]
676 + m_cornerDefectParamsPerPattern[module_pattern_idx.front() ][kCornerDefectkSagitta])
677 << " to " << sagitta_offset << ".." << (sagitta_offset+sagitta_range));
679 sagitta=sagitta_offset+CLHEP::RandFlat::shoot(rndmEngine[kCornerDefects],sagitta_range);
682 // compute column range with defects
683 double scale_rx=(corner_i==1 || corner_i==2) ? -1.f : 1.f;
684 double scale_ry=(corner_i<=1) ? -1.f : 1.f;
685 std::array<unsigned int,2> row_range{0,hwa_rows};
686 unsigned int row_var_idx=(corner_i<=1) ? 0 : 1;
687 unsigned int col_start=(corner_i==1 || corner_i==2) ? hwa_columns : 0;
689 Amg::Vector2D corner_pos{col_start*hwa_columnPitch,row_range[row_var_idx^1]*hwa_rowPitch};
691 unsigned int col_end = std::min(static_cast<unsigned int>(std::max(0,static_cast<int>(col_start)
692 + static_cast<int>(rx*scale_rx / hwa_columnPitch +.5))),
694 if (col_end < col_start) {
695 std::swap(col_start, col_end);
698 // compute circle parameters
699 Amg::Vector2D c0 = corner_pos + Amg::Vector2D{0.f,ry*scale_ry};
700 Amg::Vector2D c1 = corner_pos + Amg::Vector2D{rx*scale_rx,0.f};
701 double rho2=sqr(rx) + sqr(ry);
702 double radius = rho2/(8*sagitta)+0.5*sagitta;
703 Amg::Vector2D Rc = c0+(c1-c0)*.5 - (scale_ry*scale_rx) * (radius-sagitta)
705 *Amg::Vector2D{-ry*scale_ry,-rx*scale_rx};
707 double ymin=std::min(c0[1],c1[1]);
708 double ymax=std::max(c0[1],c1[1]);
709 for (unsigned int col_i=col_start; col_i< col_end; ++col_i) {
710 //compute row range with defects per column i.e. the row at which the circle crosses the column
711 // and the corresponding edge row (top or bottom)
712 double q = sqr(col_i*hwa_columnPitch-Rc[0]) + sqr(Rc[1]) - sqr(radius);
717 double cx = (-ph+sQ );
718 if (cx<ymin || cx>=ymax) {
719 double cx2=(-ph-sQ );
720 // pick the physical solution and make sure that
721 // that the solution remains in the allowed range despite
723 if (cx2<ymin || cx2 >=ymax) {
724 double d1=std::min(std::abs(cx-ymin),std::abs(cx-ymax));
725 double d2=std::min(std::abs(cx2-ymin),std::abs(cx2-ymax));
729 cx = std::clamp( cx, ymin, ymax - hwa_rowPitch*.25);
735 if (cx>=ymin && cx<ymax) {
736 row_range[row_var_idx]=static_cast<unsigned int>( std::max(0.,cx / hwa_rowPitch + .5));
738 if (row_range[0] < row_range[1] && row_range[0] <hwa_rows) {
739 // create corner defects for one column
740 std::pair<typename T_ModuleHelper::KEY_TYPE, typename T_ModuleHelper::KEY_TYPE> key_range{
741 helper.swapOfflineRowsColumns() ? helper.hardwareCoordinates(col_i, std::min(row_range[0],helper.nSensorColumns()-1u))
742 : helper.hardwareCoordinates(std::min(row_range[0],helper.nSensorRows()-1u),col_i),
743 helper.swapOfflineRowsColumns() ? helper.hardwareCoordinates(col_i, std::min(row_range[1]-1, helper.nSensorColumns()-1u))
744 : helper.hardwareCoordinates(std::min(row_range[1]-1,helper.nSensorRows()-1u),col_i)
747 if (EmulatedDefectsBuilder::insertKeyRange(helper, module_defects, key_range, T_ModuleHelper::makeDefectTypeKey(masks.size()>1 ? 1 : 0))) {
749 histogramDefects(helper,key_range,h2);
758 // add corner defects to random pixel defects for histogramming
759 n_mask_defects[0] += module_defects.size() - n_current_defects;
763 if (m_histogrammingEnabled) {
764 fillPerModuleHistograms(module_pattern_idx.front(),hist_pattern_i,matrix_histogram_index, matrix_index, module_i,
765 T_ModuleHelper::nMasks(), n_mask_defects, det_ele->center());
767 n_defects_total+=module_defects.size();
770 m_modulesWithoutDefectParameters += module_without_matching_pattern;
771 if (!m_outputFile.value().empty()) {
772 if (hasExtensions(m_outputFile.value(),".json")) {
773 toJson(*defects, m_outputFile.value());
775 else if (hasExtensions(m_outputFile.value(),".root")) {
776 toRoot(*defects, m_outputFile.value());
779 ATH_CHECK( defectsOut.record (std::move(defects)) );
781 if (msgLvl(MSG::INFO)) {
782 printSummaryOfDefectGeneration(T_ModuleHelper::nMasks(), n_error, n_defects_total,counts);
785 return StatusCode::SUCCESS;