5 #ifndef MUONINFERENCE_BUCKETGRAPHUTILS_H
6 #define MUONINFERENCE_BUCKETGRAPHUTILS_H
12 #include <unordered_set>
33 return b.coveredMax() -
b.coveredMin();
42 std::vector<NodeAux>& nodes,
43 std::vector<float>& featuresLeaves,
44 std::vector<int64_t>& spInBucket)
48 featuresLeaves.clear();
50 nodes.reserve(buckets.
size());
51 featuresLeaves.reserve(6
u * buckets.
size());
52 spInBucket.reserve(buckets.
size());
63 const double midY = 0.5 * (bucket->coveredMin() + bucket->coveredMax());
64 const Amg::Vector3D glob = bucket->msSector()->localToGlobalTrans(gctx) * (midY * Amg::Vector3D::UnitY());
69 std::unordered_set<unsigned int> laySet;
70 laySet.reserve(bucket->size());
71 for (
const auto& spPtr : *bucket) {
72 laySet.insert(layerSorter.sectorLayerNum(*spPtr));
74 n.layers =
static_cast<int>(laySet.size());
75 n.nSp =
static_cast<int>(bucket->size());
77 n.sector = bucket->msSector()->sector();
78 n.chamber = Acts::toUnderlying(bucket->msSector()->chamberIndex());
82 featuresLeaves.push_back(
static_cast<float>(
n.x));
83 featuresLeaves.push_back(
static_cast<float>(
n.y));
84 featuresLeaves.push_back(
static_cast<float>(
n.z));
85 featuresLeaves.push_back(
static_cast<float>(
n.layers));
86 featuresLeaves.push_back(
static_cast<float>(
n.nSp));
87 featuresLeaves.push_back(
static_cast<float>(
n.bucketSize));
89 spInBucket.emplace_back(
static_cast<int64_t
>(
n.nSp));
99 std::vector<int64_t>& srcEdges,
100 std::vector<int64_t>& dstEdges)
106 std::vector<size_t> validIdx;
107 validIdx.reserve(nodes.size());
108 for (
size_t i = 0;
i < nodes.size(); ++
i) {
112 if (validIdx.size() < 2) {
113 if (!nodes.empty()) {
114 srcEdges.push_back(0);
115 dstEdges.push_back(0);
122 for (
size_t a = 0;
a < validIdx.size(); ++
a) {
123 const size_t i = validIdx[
a];
124 const auto& ni = nodes[
i];
125 for (
size_t b =
a + 1;
b < validIdx.size(); ++
b) {
126 const size_t j = validIdx[
b];
127 const auto& nj = nodes[j];
129 const double dx =
static_cast<double>(ni.x) -
static_cast<double>(nj.x);
130 const double dy =
static_cast<double>(ni.y) -
static_cast<double>(nj.y);
131 const double dz =
static_cast<double>(ni.z) -
static_cast<double>(nj.z);
133 const double distXY = Acts::fastHypot(
dx,
dy);
134 const int secDiffLin = std::abs(ni.sector - nj.sector) %
static_cast<int>(secMax);
135 const int d_sec =
std::min(secDiffLin,
static_cast<int>(secMax) - secDiffLin);
136 const int d_ch = std::abs(ni.chamber - nj.chamber);
140 (d_sec <= maxSectorDelta) &&
141 (distXY < maxDistXY) &&
142 (std::abs(dz) < maxAbsDz) &&
143 (d_ch <= maxChamberDelta);
146 srcEdges.push_back(
static_cast<int64_t
>(
i));
147 dstEdges.push_back(
static_cast<int64_t
>(j));
148 srcEdges.push_back(
static_cast<int64_t
>(j));
149 dstEdges.push_back(
static_cast<int64_t
>(
i));
154 if (srcEdges.empty() && !nodes.empty()) {
155 srcEdges.push_back(0);
156 dstEdges.push_back(0);
161 const std::vector<int64_t>& dstEdges,
162 std::vector<int64_t>& edgeIndexPacked)
164 const size_t E = srcEdges.size();
165 edgeIndexPacked = srcEdges;
166 edgeIndexPacked.insert(edgeIndexPacked.end(), dstEdges.begin(), dstEdges.end());