ATLAS Offline Software
Loading...
Searching...
No Matches
TemporaryHelpers.h
Go to the documentation of this file.
1//
2// Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
3//
4// Dear emacs, this is -*- c++ -*-
5//
6
7#ifndef CALORECGPU_TEMPORARYHELPERS_H
8#define CALORECGPU_TEMPORARYHELPERS_H
9
10#define CALORECGPU_TEMP_CONCAT_HELPER_INNER(A, ...) A ## __VA_ARGS__
11#define CALORECGPU_TEMP_CONCAT_HELPER(A, B) CALORECGPU_TEMP_CONCAT_HELPER_INNER(A, B)
12
13
14//We can define a type that already contains all of the necessary temporary arrays
15//(of appropriate size and type, of course, and with the same names as the functions),
16//or we can simply rely on the individual arrays inside the moments.
17//The former is more efficient whenever practical,
18//but obviously not supported if the moments have been calculated.
19//We may have better future solutions for this...
20
21#ifdef CALORECGPU_TEMP_STRUCT_TO_USE
22
23namespace CaloRecGPU
24{
25 template <class T, class PtrLike>
26 __host__ __device__ T * get_pointer_to_temp_struct(PtrLike && p)
27 {
28 return std::launder(reinterpret_cast<T *>(&(p->moments)));
29 }
30}
31
32#define CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE) _Pragma("nv_diag_suppress 177") \
33 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
34 { \
35 return CaloRecGPU::get_pointer_to_temp_struct<const CALORECGPU_TEMP_STRUCT_TO_USE>(arr)->TEMPNAME + idx; \
36 } \
37 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
38 { \
39 return CaloRecGPU::get_pointer_to_temp_struct<CALORECGPU_TEMP_STRUCT_TO_USE>(arr)->TEMPNAME + idx; \
40 } \
41 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
42 { \
43 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
44 } \
45 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
46 { \
47 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
48 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
49
50#define CALORECGPU_TEMPARR_BASE_2D(TEMPNAME, TYPE) _Pragma("nv_diag_suppress 177") \
51 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
52 { \
53 return &(CaloRecGPU::get_pointer_to_temp_struct<const CALORECGPU_TEMP_STRUCT_TO_USE>(arr)->TEMPNAME[jdx][idx]); \
54 } \
55 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
56 { \
57 return &(CaloRecGPU::get_pointer_to_temp_struct<CALORECGPU_TEMP_STRUCT_TO_USE>(arr)->TEMPNAME[jdx][idx]); \
58 } \
59 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
60 { \
61 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, jdx, idx); \
62 } \
63 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
64 { \
65 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, jdx, idx); \
66 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
67
71#define CALORECGPU_TEMPARR_1(TEMPNAME, BASEVAR, TYPE) CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE)
72
76#define CALORECGPU_TEMPARR_2(TEMPNAME, BASEVAR1, BASEVAR2, TYPE) CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE)
77
81#define CALORECGPU_TEMP2DARR_1(TEMPNAME, BASEVAR, TYPE) CALORECGPU_TEMPARR_BASE_2D(TEMPNAME, TYPE)
82
86#define CALORECGPU_TEMP2DARR_2(TEMPNAME, BASEVAR1, BASEVAR2, TYPE) CALORECGPU_TEMPARR_BASE_2D(TEMPNAME, TYPE)
87
93#define CALORECGPU_TEMPBIGARR_1(TEMPNAME, BASEVAR, TYPE) CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE)
94
100#define CALORECGPU_TEMPBIGARR_2(TEMPNAME, BASEVAR1, BASEVAR2, TYPE) CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE)
101
107#define CALORECGPU_TEMPBIGARR_3(TEMPNAME, BASEVAR1, BASEVAR2, BASEVAR3, TYPE) CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE)
108
112#define CALORECGPU_TEMPCELLARR_1(TEMPNAME, BASEVAR1, BASEVAR2, BASEVAR3, TYPE) CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE)
113
117#define CALORECGPU_TEMPCELLARR_2(TEMPNAME, BASEVAR1, BASEVAR2, BASEVAR3, BASEVAR4, BASEVAR5, BASEVAR6, TYPE) CALORECGPU_TEMPARR_BASE_1D(TEMPNAME, TYPE)
118
126#define CALORECGPU_TEMPVAR(TEMPNAME, BASEVAR, INDEX, TYPE) _Pragma("nv_diag_suppress 177") \
127 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr) \
128 { \
129 return &CaloRecGPU::get_pointer_to_temp_struct<const CALORECGPU_TEMP_STRUCT_TO_USE>(arr)->TEMPNAME; \
130 } \
131 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr) \
132 { \
133 return &CaloRecGPU::get_pointer_to_temp_struct<CALORECGPU_TEMP_STRUCT_TO_USE>(arr)->TEMPNAME; \
134 } \
135 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr) \
136 { \
137 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr); \
138 } \
139 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr) \
140 { \
141 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr); \
142 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
143
145#define CALORECGPU_TEMPWRAPPER(TEMPNAME, WRAPPED) _Pragma("nv_diag_suppress 177") \
146 template <class PtrLike, class ... Args> __host__ __device__ decltype(auto) TEMPNAME (PtrLike && p, Args && ... args) \
147 { \
148 return std::forward<PtrLike>(p)-> WRAPPED (std::forward<Args>(args)...); \
149 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
150
151
152#else
153
154
156
157#include <cassert>
158#include <new>
159#include <utility>
160#include <type_traits>
161
162namespace CaloRecGPU
163{
164 //Assumptions: each individual pointer-like object
165 //refers to a contiguous memory region
166 //whose first address is obtainable by &p[0].
167 template <class T, unsigned int ... us, class ... PtrLikes>
168 __host__ __device__ T * get_laundered_pointer(unsigned int idx, PtrLikes && ... p);
169
170 inline constexpr unsigned int get_extra_alignment(const unsigned int base_align, const unsigned int required)
171 {
172 const unsigned int delta = base_align % required;
173
174 return required * (delta != 0) - delta;
175 }
176
177 template <class T>
178 constexpr bool __host__ __device__ check_sufficient_size(unsigned int offset, unsigned int index)
179 {
180 return (NMaxClusters >= get_extra_alignment(offset, alignof(T)) + index * sizeof(T));
181 }
182
183 template <class T, unsigned int offset, class PtrLike>
184 __host__ __device__ T * get_laundered_pointer(unsigned int idx, PtrLike && ptr)
185 {
186 using PtrType = std::decay_t<decltype(ptr[0])>;
187 using BasePtrType = std::conditional_t<std::is_const_v<PtrType>, const char *, char *>;
188
189 constexpr unsigned int base_offset = offset % alignof(T);
190
191 constexpr unsigned int extra_alignment = get_extra_alignment(base_offset, alignof(T));
192
193 BasePtrType base_ptr = reinterpret_cast<BasePtrType>(&ptr[0]);
194
195 return std::launder(reinterpret_cast<T *>(base_ptr + extra_alignment + idx * sizeof(T)));
196 }
197
198 template <class T, unsigned int offset, unsigned int ... us, class PtrLike, class ... PtrLikes>
199 __host__ __device__ T * get_laundered_pointer(unsigned int idx, PtrLike && ptr, PtrLikes && ... ps)
200 {
201 using PtrType = std::decay_t<decltype(ptr[0])>;
202
203 constexpr unsigned int max_size = NMaxClusters * sizeof(PtrType);
204
205 constexpr unsigned int base_offset = offset % alignof(T);
206 constexpr unsigned int extra_alignment = get_extra_alignment(base_offset, alignof(T));
207
208 constexpr unsigned int real_size = (max_size - extra_alignment) / sizeof(T);
209
210 return (real_size > idx ? get_laundered_pointer<T, offset>(idx, std::forward<PtrLike>(ptr)) : get_laundered_pointer<T, us...>(idx - real_size, std::forward<PtrLikes>(ps)...));
211 }
212
213 //In this case, the pointer-likes are to 2D arrays
214 //and we stack them so that ptr[j][N] and ptr[j][N+1]
215 //may be across different arrays.
216 //For ease of implementation, we only support suitably aligned
217 //classes (up to double).
218 template <class T, class ... PtrLikes>
219 __host__ __device__ T * get_laundered_pointer_striped(unsigned int jdx, unsigned int idx, PtrLikes && ... ps)
220 {
221 static_assert(alignof(T) <= alignof(double), "We don't support aligning in this case...");
222
223 return get_laundered_pointer<T, (alignof(decltype(ps[0][0])) * 0)...>(idx, ps[jdx]...);
224 //We pass a param pack of 0s (with the right size)
225 //as we know we won't need alignment.
226 }
227
228 //In this case, we use the 2D arrays
229 //to store a single, contiguous array
230 //such that ptr[j][N] and ptr[j + 1][0]
231 //may hold contiguous objects.
232 template <class T, class ... PtrLikes>
233 __host__ __device__ T * get_laundered_pointer_stacked(unsigned int idx, PtrLikes && ... ps);
234
235 template <class T, class PtrLike>
236 __host__ __device__ T * get_laundered_pointer_stacked(unsigned int idx, PtrLike && ptr)
237 {
238 static_assert(alignof(T) <= alignof(double), "We don't support aligning in this case...");
239
240 using PtrType = std::decay_t<decltype(ptr[0][0])>;
241 using BasePtrType = std::conditional_t<std::is_const_v<PtrType>, const char *, char *>;
242
243 constexpr unsigned int num_per_array = (NMaxClusters * sizeof(PtrType)) / sizeof(T);
244
245 const unsigned int first_idx = idx / num_per_array;
246
247 const unsigned int second_idx = idx % num_per_array;
248
249 return get_laundered_pointer<T, 0>(second_idx, ptr[first_idx]);
250 }
251
252 template <class T, class PtrLike, class ... PtrLikes>
253 __host__ __device__ T * get_laundered_pointer_stacked(unsigned int idx, PtrLike && ptr, PtrLikes && ... ps)
254 {
255 static_assert(alignof(T) <= alignof(double));
256
257 using PtrType = std::decay_t<decltype(ptr[0][0])>;
258
259 constexpr unsigned int num_per_array = (NMaxClusters * sizeof(PtrType)) / sizeof(T);
260 constexpr unsigned int total_num = num_per_array * NumSamplings;
261
262 return (total_num > idx ? get_laundered_pointer_stacked<T>(idx, std::forward<PtrLike>(ptr)) : get_laundered_pointer_stacked<T>(idx - total_num, std::forward<PtrLikes>(ps)...));
263 }
264}
265
269#define CALORECGPU_TEMPARR_1(TEMPNAME, BASEVAR, TYPE) _Pragma("nv_diag_suppress 177") \
270 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
271 { \
272 return CaloRecGPU::get_laundered_pointer<const TYPE, offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR)>(idx, arr->moments. BASEVAR); \
273 } \
274 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
275 { \
276 return CaloRecGPU::get_laundered_pointer<TYPE, offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR)>(idx, arr->moments. BASEVAR); \
277 } \
278 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
279 { \
280 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
281 } \
282 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
283 { \
284 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
285 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
286
287
291#define CALORECGPU_TEMPARR_2(TEMPNAME, BASEVAR1, BASEVAR2, TYPE) _Pragma("nv_diag_suppress 177") \
292 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
293 { \
294 return CaloRecGPU::get_laundered_pointer<const TYPE, \
295 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR1), \
296 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR2)> \
297 (idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2); \
298 } \
299 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
300 { \
301 return CaloRecGPU::get_laundered_pointer<TYPE, \
302 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR1), \
303 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR2)> \
304 (idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2); \
305 } \
306 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
307 { \
308 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
309 } \
310 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
311 { \
312 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
313 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
314
315
319#define CALORECGPU_TEMP2DARR_1(TEMPNAME, BASEVAR, TYPE) _Pragma("nv_diag_suppress 177") \
320 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
321 { \
322 return CaloRecGPU::get_laundered_pointer_striped<const TYPE>(jdx, idx, arr->moments. BASEVAR); \
323 } \
324 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
325 { \
326 return CaloRecGPU::get_laundered_pointer_striped<TYPE>(jdx, idx, arr->moments. BASEVAR); \
327 } \
328 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
329 { \
330 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, jdx, idx); \
331 } \
332 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
333 { \
334 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, jdx, idx); \
335 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
336
337
341#define CALORECGPU_TEMP2DARR_2(TEMPNAME, BASEVAR1, BASEVAR2, TYPE) _Pragma("nv_diag_suppress 177") \
342 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
343 { \
344 return CaloRecGPU::get_laundered_pointer_striped<const TYPE>(jdx, idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2); \
345 } \
346 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
347 { \
348 return CaloRecGPU::get_laundered_pointer_striped<TYPE>(jdx, idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2); \
349 } \
350 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
351 { \
352 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, jdx, idx); \
353 } \
354 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int jdx, const unsigned int idx) \
355 { \
356 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, jdx, idx); \
357 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
358
359
365#define CALORECGPU_TEMPBIGARR_1(TEMPNAME, BASEVAR, TYPE) _Pragma("nv_diag_suppress 177") \
366 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
367 { \
368 return CaloRecGPU::get_laundered_pointer_stacked<const TYPE>(idx, arr->moments. BASEVAR); \
369 } \
370 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
371 { \
372 return CaloRecGPU::get_laundered_pointer_stacked<TYPE>(idx, arr->moments. BASEVAR); \
373 } \
374 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
375 { \
376 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
377 } \
378 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
379 { \
380 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
381 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
382
388#define CALORECGPU_TEMPBIGARR_2(TEMPNAME, BASEVAR1, BASEVAR2, TYPE) _Pragma("nv_diag_suppress 177") \
389 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
390 { \
391 return CaloRecGPU::get_laundered_pointer_stacked<const TYPE>(idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2); \
392 } \
393 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
394 { \
395 return CaloRecGPU::get_laundered_pointer_stacked<TYPE>(idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2); \
396 } \
397 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
398 { \
399 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
400 } \
401 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
402 { \
403 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
404 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
405
411#define CALORECGPU_TEMPBIGARR_3(TEMPNAME, BASEVAR1, BASEVAR2, BASEVAR3, TYPE) _Pragma("nv_diag_suppress 177") \
412 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
413 { \
414 return CaloRecGPU::get_laundered_pointer_stacked<const TYPE>(idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2, arr->moments. BASEVAR3); \
415 } \
416 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
417 { \
418 return CaloRecGPU::get_laundered_pointer_stacked<TYPE>(idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2, arr->moments. BASEVAR3); \
419 } \
420 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
421 { \
422 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
423 } \
424 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
425 { \
426 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
427 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
428
432#define CALORECGPU_TEMPCELLARR_1(TEMPNAME, BASEVAR1, BASEVAR2, BASEVAR3, TYPE) _Pragma("nv_diag_suppress 177") \
433 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
434 { \
435 return CaloRecGPU::get_laundered_pointer<const TYPE, \
436 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR1), \
437 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR2), \
438 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR3)> \
439 (idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2, arr->moments. BASEVAR3); \
440 } \
441 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
442 { \
443 return CaloRecGPU::get_laundered_pointer<TYPE, \
444 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR1), \
445 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR2), \
446 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR3)> \
447 (idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2, arr->moments. BASEVAR3); \
448 } \
449 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
450 { \
451 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
452 } \
453 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
454 { \
455 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
456 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
457
458
462#define CALORECGPU_TEMPCELLARR_2(TEMPNAME, BASEVAR1, BASEVAR2, BASEVAR3, BASEVAR4, BASEVAR5, BASEVAR6, TYPE) _Pragma("nv_diag_suppress 177") \
463 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr, const unsigned int idx) \
464 { \
465 return CaloRecGPU::get_laundered_pointer<const TYPE, \
466 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR1), \
467 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR2), \
468 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR3), \
469 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR4), \
470 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR5), \
471 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR6)> \
472 (idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2, arr->moments. BASEVAR3, \
473 arr->moments. BASEVAR4, arr->moments. BASEVAR5, arr->moments. BASEVAR6); \
474 } \
475 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr, const unsigned int idx) \
476 { \
477 return CaloRecGPU::get_laundered_pointer<TYPE, \
478 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR1), \
479 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR2), \
480 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR3), \
481 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR4), \
482 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR5), \
483 offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR6)> \
484 (idx, arr->moments. BASEVAR1, arr->moments. BASEVAR2, arr->moments. BASEVAR3, \
485 arr->moments. BASEVAR4, arr->moments. BASEVAR5, arr->moments. BASEVAR6); \
486 } \
487 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr, const unsigned int idx) \
488 { \
489 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
490 } \
491 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr, const unsigned int idx) \
492 { \
493 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr, idx); \
494 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
495
496
504#define CALORECGPU_TEMPVAR(TEMPNAME, BASEVAR, INDEX, TYPE) _Pragma("nv_diag_suppress 177") \
505 template <class PtrLike> __host__ __device__ const TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (const PtrLike & arr) \
506 { \
507 static_assert(CaloRecGPU::check_sufficient_size<TYPE>(offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR), INDEX)); \
508 return CaloRecGPU::get_laundered_pointer<const TYPE, offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR)>(INDEX, arr->moments. BASEVAR); \
509 } \
510 template <class PtrLike> __host__ __device__ TYPE * CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr) (PtrLike & arr) \
511 { \
512 static_assert(CaloRecGPU::check_sufficient_size<TYPE>(offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR), INDEX)); \
513 return CaloRecGPU::get_laundered_pointer<TYPE, offsetof(CaloRecGPU::ClusterInfoArr::ClusterMomentsArr, BASEVAR)>(INDEX, arr->moments. BASEVAR); \
514 } \
515 template <class PtrLike> __host__ __device__ const TYPE & TEMPNAME (const PtrLike & arr) \
516 { \
517 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr); \
518 } \
519 template <class PtrLike> __host__ __device__ TYPE & TEMPNAME (PtrLike & arr) \
520 { \
521 return *CALORECGPU_TEMP_CONCAT_HELPER(TEMPNAME, _ptr)(arr); \
522 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
523
525#define CALORECGPU_TEMPWRAPPER(TEMPNAME, WRAPPED) _Pragma("nv_diag_suppress 177") \
526 template <class PtrLike, class ... Args> __host__ __device__ decltype(auto) TEMPNAME (PtrLike && p, Args && ... args) \
527 { \
528 return std::forward<PtrLike>(p)-> WRAPPED (std::forward<Args>(args)...); \
529 } _Pragma("nv_diag_default 177") struct to_end_with_semicolon
530
531#endif
532
533#endif
Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration.
constexpr int NumSamplings
__host__ __device__ T * get_laundered_pointer_stacked(unsigned int idx, PtrLikes &&... ps)
constexpr unsigned int get_extra_alignment(const unsigned int base_align, const unsigned int required)
__host__ __device__ T * get_laundered_pointer(unsigned int idx, PtrLikes &&... p)
constexpr int NMaxClusters
__host__ __device__ T * get_laundered_pointer_striped(unsigned int jdx, unsigned int idx, PtrLikes &&... ps)
constexpr bool __host__ __device__ check_sufficient_size(unsigned int offset, unsigned int index)
unsigned long long T
Definition index.py:1