5 #ifndef MUONINFERENCE_BUCKETGRAPHUTILS_H
6 #define MUONINFERENCE_BUCKETGRAPHUTILS_H
12 #include <unordered_set>
33 return b.coveredMax() -
b.coveredMin();
42 std::vector<NodeAux>& nodes,
43 std::vector<float>& featuresLeaves,
44 std::vector<int64_t>& spInBucket)
48 featuresLeaves.clear();
50 nodes.reserve(buckets.
size());
51 featuresLeaves.reserve(6
u * buckets.
size());
52 spInBucket.reserve(buckets.
size());
63 if (bucket->msSector()) {
64 const double midY = 0.5 * (bucket->coveredMin() + bucket->coveredMax());
65 const Amg::Vector3D glob = bucket->msSector()->localToGlobalTrans(gctx) * (midY * Amg::Vector3D::UnitY());
71 std::unordered_set<unsigned int> laySet;
72 laySet.reserve(bucket->size());
73 for (
const auto& spPtr : *bucket) {
74 laySet.insert(layerSorter.sectorLayerNum(*spPtr));
76 n.layers =
static_cast<int>(laySet.size());
77 n.nSp =
static_cast<int>(bucket->size());
79 n.sector = bucket->msSector()->sector();
80 n.chamber = Acts::toUnderlying(bucket->msSector()->chamberIndex());
84 featuresLeaves.push_back(
static_cast<float>(
n.x));
85 featuresLeaves.push_back(
static_cast<float>(
n.y));
86 featuresLeaves.push_back(
static_cast<float>(
n.z));
87 featuresLeaves.push_back(
static_cast<float>(
n.layers));
88 featuresLeaves.push_back(
static_cast<float>(
n.nSp));
89 featuresLeaves.push_back(
static_cast<float>(
n.bucketSize));
91 spInBucket.emplace_back(
static_cast<int64_t
>(
n.nSp));
101 std::vector<int64_t>& srcEdges,
102 std::vector<int64_t>& dstEdges)
108 std::vector<size_t> validIdx;
109 validIdx.reserve(nodes.size());
110 for (
size_t i = 0;
i < nodes.size(); ++
i) {
114 if (validIdx.size() < 2) {
115 if (!nodes.empty()) {
116 srcEdges.push_back(0);
117 dstEdges.push_back(0);
124 for (
size_t a = 0;
a < validIdx.size(); ++
a) {
125 const size_t i = validIdx[
a];
126 const auto& ni = nodes[
i];
127 for (
size_t b =
a + 1;
b < validIdx.size(); ++
b) {
128 const size_t j = validIdx[
b];
129 const auto& nj = nodes[j];
131 const double dx =
static_cast<double>(ni.x) -
static_cast<double>(nj.x);
132 const double dy =
static_cast<double>(ni.y) -
static_cast<double>(nj.y);
133 const double dz =
static_cast<double>(ni.z) -
static_cast<double>(nj.z);
135 const double distXY = Acts::fastHypot(
dx,
dy);
136 const int secDiffLin = std::abs(ni.sector - nj.sector) %
static_cast<int>(secMax);
137 const int d_sec =
std::min(secDiffLin,
static_cast<int>(secMax) - secDiffLin);
138 const int d_ch = std::abs(ni.chamber - nj.chamber);
142 (d_sec <= maxSectorDelta) &&
143 (distXY < maxDistXY) &&
144 (std::abs(dz) < maxAbsDz) &&
145 (d_ch <= maxChamberDelta);
148 srcEdges.push_back(
static_cast<int64_t
>(
i));
149 dstEdges.push_back(
static_cast<int64_t
>(j));
150 srcEdges.push_back(
static_cast<int64_t
>(j));
151 dstEdges.push_back(
static_cast<int64_t
>(
i));
156 if (srcEdges.empty() && !nodes.empty()) {
157 srcEdges.push_back(0);
158 dstEdges.push_back(0);
163 const std::vector<int64_t>& dstEdges,
164 std::vector<int64_t>& edgeIndexPacked)
166 const size_t E = srcEdges.size();
167 edgeIndexPacked = srcEdges;
168 edgeIndexPacked.insert(edgeIndexPacked.end(), dstEdges.begin(), dstEdges.end());