@@ -283,7 +283,7 @@ struct Slab {
283
283
284
284
// / A wait-free guard around a pointer resource to be created dynamically if
285
285
// / space is available and freed once there are no more users.
286
- template < typename T> struct GuardPtr {
286
+ struct GuardPtr {
287
287
private:
288
288
struct RefCounter {
289
289
// Indicates that the object is in its deallocation phase and thus invalid.
@@ -339,22 +339,22 @@ template <typename T> struct GuardPtr {
339
339
cpp::Atomic<uint64_t > counter{0 };
340
340
};
341
341
342
- cpp::Atomic<T *> ptr{nullptr };
342
+ cpp::Atomic<Slab *> ptr{nullptr };
343
343
RefCounter ref{};
344
344
345
345
// Should be called be a single lane for each different pointer.
346
346
template <typename ... Args>
347
- T *try_lock_impl (uint32_t n, uint64_t &count, Args &&...args) {
348
- T *expected = ptr.load (cpp::MemoryOrder::RELAXED);
347
+ Slab *try_lock_impl (uint32_t n, uint64_t &count, Args &&...args) {
348
+ Slab *expected = ptr.load (cpp::MemoryOrder::RELAXED);
349
349
if (!expected &&
350
- ptr.compare_exchange_strong (expected, reinterpret_cast <T *>(SENTINEL),
351
- cpp::MemoryOrder::RELAXED ,
352
- cpp::MemoryOrder::RELAXED)) {
350
+ ptr.compare_exchange_strong (
351
+ expected, reinterpret_cast <Slab *>(SENTINEL) ,
352
+ cpp::MemoryOrder::RELAXED, cpp::MemoryOrder::RELAXED)) {
353
353
count = cpp::numeric_limits<uint64_t >::max ();
354
- void *raw = impl::rpc_allocate (sizeof (T ));
354
+ void *raw = impl::rpc_allocate (sizeof (Slab ));
355
355
if (!raw)
356
356
return nullptr ;
357
- T *mem = new (raw) T (cpp::forward<Args>(args)...);
357
+ Slab *mem = new (raw) Slab (cpp::forward<Args>(args)...);
358
358
359
359
cpp::atomic_thread_fence (cpp::MemoryOrder::RELEASE);
360
360
ptr.store (mem, cpp::MemoryOrder::RELAXED);
@@ -364,7 +364,7 @@ template <typename T> struct GuardPtr {
364
364
return mem;
365
365
}
366
366
367
- if (!expected || expected == reinterpret_cast <T *>(SENTINEL))
367
+ if (!expected || expected == reinterpret_cast <Slab *>(SENTINEL))
368
368
return nullptr ;
369
369
370
370
if (!ref.acquire (n, count))
@@ -379,10 +379,10 @@ template <typename T> struct GuardPtr {
379
379
// The uniform mask represents which lanes share the same pointer. For each
380
380
// uniform value we elect a leader to handle it on behalf of the other lanes.
381
381
template <typename ... Args>
382
- T *try_lock (uint64_t lane_mask, uint64_t uniform, uint64_t &count,
383
- Args &&...args) {
382
+ Slab *try_lock (uint64_t lane_mask, uint64_t uniform, uint64_t &count,
383
+ Args &&...args) {
384
384
count = 0 ;
385
- T *result = nullptr ;
385
+ Slab *result = nullptr ;
386
386
if (gpu::get_lane_id () == uint32_t (cpp::countr_zero (uniform)))
387
387
result = try_lock_impl (cpp::popcount (uniform), count,
388
388
cpp::forward<Args>(args)...);
@@ -403,8 +403,8 @@ template <typename T> struct GuardPtr {
403
403
cpp::atomic_thread_fence (cpp::MemoryOrder::RELEASE);
404
404
if (gpu::get_lane_id () == uint32_t (cpp::countr_zero (mask)) &&
405
405
ref.release (cpp::popcount (mask))) {
406
- T *p = ptr.load (cpp::MemoryOrder::RELAXED);
407
- p->~T ();
406
+ Slab *p = ptr.load (cpp::MemoryOrder::RELAXED);
407
+ p->~Slab ();
408
408
impl::rpc_free (p);
409
409
cpp::atomic_thread_fence (cpp::MemoryOrder::RELEASE);
410
410
ptr.store (nullptr , cpp::MemoryOrder::RELAXED);
@@ -417,7 +417,7 @@ template <typename T> struct GuardPtr {
417
417
};
418
418
419
419
// The global array used to search for a valid slab to allocate from.
420
- static GuardPtr<Slab> slots[ARRAY_SIZE] = {};
420
+ static GuardPtr slots[ARRAY_SIZE] = {};
421
421
422
422
// Tries to find a slab in the table that can support the given chunk size.
423
423
static Slab *find_slab (uint32_t chunk_size) {
0 commit comments