Skip to content

Commit 8ce036d

Browse files
authored
[scudo] Add ScopedTSD to avoid releasing TSD manually (llvm#80061)
This makes the use of TSD be RAII style and avoid the exposing of the type of TSDs. Also move some thread safety analyses from static to runtime because of its limitation. Even we mark some code path as NO_THREAD_SAFETY_ANALYSIS but we still have the `assertLocked()` cover the correctness.
1 parent dfdea4d commit 8ce036d

File tree

5 files changed

+130
-98
lines changed

5 files changed

+130
-98
lines changed

compiler-rt/lib/scudo/standalone/combined.h

Lines changed: 7 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -364,9 +364,7 @@ class Allocator {
364364
if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
365365
ClassId = SizeClassMap::getClassIdBySize(NeededSize);
366366
DCHECK_NE(ClassId, 0U);
367-
bool UnlockRequired;
368-
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
369-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
367+
typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
370368
Block = TSD->getCache().allocate(ClassId);
371369
// If the allocation failed, retry in each successively larger class until
372370
// it fits. If it fails to fit in the largest class, fallback to the
@@ -377,8 +375,6 @@ class Allocator {
377375
if (!Block)
378376
ClassId = 0;
379377
}
380-
if (UnlockRequired)
381-
TSD->unlock();
382378
}
383379
if (UNLIKELY(ClassId == 0)) {
384380
Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
@@ -1145,13 +1141,11 @@ class Allocator {
11451141
void *BlockBegin = getBlockBegin(Ptr, Header);
11461142
const uptr ClassId = Header->ClassId;
11471143
if (LIKELY(ClassId)) {
1148-
bool UnlockRequired;
1149-
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1150-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
1151-
const bool CacheDrained =
1152-
TSD->getCache().deallocate(ClassId, BlockBegin);
1153-
if (UnlockRequired)
1154-
TSD->unlock();
1144+
bool CacheDrained;
1145+
{
1146+
typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
1147+
CacheDrained = TSD->getCache().deallocate(ClassId, BlockBegin);
1148+
}
11551149
// When we have drained some blocks back to the Primary from TSD, that
11561150
// implies that we may have the chance to release some pages as well.
11571151
// Note that in order not to block other thread's accessing the TSD,
@@ -1165,13 +1159,9 @@ class Allocator {
11651159
Secondary.deallocate(Options, BlockBegin);
11661160
}
11671161
} else {
1168-
bool UnlockRequired;
1169-
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
1170-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
1162+
typename TSDRegistryT::ScopedTSD TSD(TSDRegistry);
11711163
Quarantine.put(&TSD->getQuarantineCache(),
11721164
QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
1173-
if (UnlockRequired)
1174-
TSD->unlock();
11751165
}
11761166
}
11771167

compiler-rt/lib/scudo/standalone/tests/combined_test.cpp

Lines changed: 24 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -229,11 +229,25 @@ struct TestConditionVariableConfig {
229229
#define SCUDO_TYPED_TEST(FIXTURE, NAME) \
230230
template <class TypeParam> \
231231
struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
232+
using BaseT = FIXTURE<TypeParam>; \
232233
void Run(); \
233234
}; \
234235
SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
235236
template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
236237

238+
// Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
239+
// using thread-safety analysis. Alternatively, we verify the thread safety
240+
// through a runtime check in ScopedTSD and mark the test body with
241+
// NO_THREAD_SAFETY_ANALYSIS.
242+
#define SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(FIXTURE, NAME) \
243+
template <class TypeParam> \
244+
struct FIXTURE##NAME : public FIXTURE<TypeParam> { \
245+
using BaseT = FIXTURE<TypeParam>; \
246+
void Run() NO_THREAD_SAFETY_ANALYSIS; \
247+
}; \
248+
SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \
249+
template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
250+
237251
SCUDO_TYPED_TEST(ScudoCombinedTest, IsOwned) {
238252
auto *Allocator = this->Allocator.get();
239253
static scudo::u8 StaticBuffer[scudo::Chunk::getHeaderSize() + 1];
@@ -547,7 +561,8 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
547561
EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
548562
}
549563

550-
SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
564+
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, CacheDrain) {
565+
using AllocatorT = typename BaseT::AllocatorT;
551566
auto *Allocator = this->Allocator.get();
552567

553568
std::vector<void *> V;
@@ -559,17 +574,15 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
559574
for (auto P : V)
560575
Allocator->deallocate(P, Origin);
561576

562-
bool UnlockRequired;
563-
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
564-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
577+
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
578+
*Allocator->getTSDRegistry());
565579
EXPECT_TRUE(!TSD->getCache().isEmpty());
566580
TSD->getCache().drain();
567581
EXPECT_TRUE(TSD->getCache().isEmpty());
568-
if (UnlockRequired)
569-
TSD->unlock();
570582
}
571583

572-
SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
584+
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, ForceCacheDrain) {
585+
using AllocatorT = typename BaseT::AllocatorT;
573586
auto *Allocator = this->Allocator.get();
574587

575588
std::vector<void *> V;
@@ -584,14 +597,11 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
584597
// `ForceAll` will also drain the caches.
585598
Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll);
586599

587-
bool UnlockRequired;
588-
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
589-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
600+
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
601+
*Allocator->getTSDRegistry());
590602
EXPECT_TRUE(TSD->getCache().isEmpty());
591603
EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
592604
EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
593-
if (UnlockRequired)
594-
TSD->unlock();
595605
}
596606

597607
SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
@@ -892,8 +902,8 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
892902
}
893903

894904
bool UnlockRequired;
895-
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
896-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
905+
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(
906+
*Allocator->getTSDRegistry());
897907
TSD->getCache().drain();
898908

899909
Allocator->releaseToOS(scudo::ReleaseToOS::Force);

compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp

Lines changed: 25 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <mutex>
1818
#include <set>
1919
#include <thread>
20+
#include <type_traits>
2021

2122
// We mock out an allocator with a TSD registry, mostly using empty stubs. The
2223
// cache contains a single volatile uptr, to be able to test that several
@@ -86,7 +87,8 @@ TEST(ScudoTSDTest, TSDRegistryInit) {
8687
EXPECT_TRUE(Allocator->isInitialized());
8788
}
8889

89-
template <class AllocatorT> static void testRegistry() {
90+
template <class AllocatorT>
91+
static void testRegistry() NO_THREAD_SAFETY_ANALYSIS {
9092
auto Deleter = [](AllocatorT *A) {
9193
A->unmapTestOnly();
9294
delete A;
@@ -99,22 +101,17 @@ template <class AllocatorT> static void testRegistry() {
99101
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/true);
100102
EXPECT_TRUE(Allocator->isInitialized());
101103

102-
bool UnlockRequired;
103-
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
104-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
105-
EXPECT_NE(TSD, nullptr);
106-
EXPECT_EQ(TSD->getCache().Canary, 0U);
107-
if (UnlockRequired)
108-
TSD->unlock();
104+
{
105+
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
106+
EXPECT_EQ(TSD->getCache().Canary, 0U);
107+
}
109108

110109
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
111-
TSD = Registry->getTSDAndLock(&UnlockRequired);
112-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
113-
EXPECT_NE(TSD, nullptr);
114-
EXPECT_EQ(TSD->getCache().Canary, 0U);
115-
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
116-
if (UnlockRequired)
117-
TSD->unlock();
110+
{
111+
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
112+
EXPECT_EQ(TSD->getCache().Canary, 0U);
113+
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
114+
}
118115
}
119116

120117
TEST(ScudoTSDTest, TSDRegistryBasic) {
@@ -129,31 +126,33 @@ static std::mutex Mutex;
129126
static std::condition_variable Cv;
130127
static bool Ready;
131128

132-
template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
129+
// Accessing `TSD->getCache()` requires `TSD::Mutex` which isn't easy to test
130+
// using thread-safety analysis. Alternatively, we verify the thread safety
131+
// through a runtime check in ScopedTSD and mark the test body with
132+
// NO_THREAD_SAFETY_ANALYSIS.
133+
template <typename AllocatorT>
134+
static void stressCache(AllocatorT *Allocator) NO_THREAD_SAFETY_ANALYSIS {
133135
auto Registry = Allocator->getTSDRegistry();
134136
{
135137
std::unique_lock<std::mutex> Lock(Mutex);
136138
while (!Ready)
137139
Cv.wait(Lock);
138140
}
139141
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
140-
bool UnlockRequired;
141-
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
142-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
143-
EXPECT_NE(TSD, nullptr);
142+
typename AllocatorT::TSDRegistryT::ScopedTSD TSD(*Registry);
144143
// For an exclusive TSD, the cache should be empty. We cannot guarantee the
145144
// same for a shared TSD.
146-
if (!UnlockRequired)
145+
if (std::is_same<typename AllocatorT::TSDRegistryT,
146+
scudo::TSDRegistryExT<AllocatorT>>()) {
147147
EXPECT_EQ(TSD->getCache().Canary, 0U);
148+
}
148149
// Transform the thread id to a uptr to use it as canary.
149150
const scudo::uptr Canary = static_cast<scudo::uptr>(
150151
std::hash<std::thread::id>{}(std::this_thread::get_id()));
151152
TSD->getCache().Canary = Canary;
152153
// Loop a few times to make sure that a concurrent thread isn't modifying it.
153154
for (scudo::uptr I = 0; I < 4096U; I++)
154155
EXPECT_EQ(TSD->getCache().Canary, Canary);
155-
if (UnlockRequired)
156-
TSD->unlock();
157156
}
158157

159158
template <class AllocatorT> static void testRegistryThreaded() {
@@ -195,14 +194,10 @@ static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
195194
Cv.wait(Lock);
196195
}
197196
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
198-
bool UnlockRequired;
199197
for (scudo::uptr I = 0; I < 4096U; I++) {
200-
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
201-
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
202-
EXPECT_NE(TSD, nullptr);
203-
Set.insert(reinterpret_cast<void *>(TSD));
204-
if (UnlockRequired)
205-
TSD->unlock();
198+
typename MockAllocator<SharedCaches>::TSDRegistryT::ScopedTSD TSD(
199+
*Registry);
200+
Set.insert(reinterpret_cast<void *>(&*TSD));
206201
}
207202
{
208203
std::unique_lock<std::mutex> Lock(Mutex);

compiler-rt/lib/scudo/standalone/tsd_exclusive.h

Lines changed: 37 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,31 @@ struct ThreadState {
2727
template <class Allocator> void teardownThread(void *Ptr);
2828

2929
template <class Allocator> struct TSDRegistryExT {
30+
using ThisT = TSDRegistryExT<Allocator>;
31+
32+
struct ScopedTSD {
33+
ScopedTSD(ThisT &TSDRegistry) {
34+
CurrentTSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
35+
DCHECK_NE(CurrentTSD, nullptr);
36+
}
37+
38+
~ScopedTSD() {
39+
if (UNLIKELY(UnlockRequired))
40+
CurrentTSD->unlock();
41+
}
42+
43+
TSD<Allocator> &operator*() { return *CurrentTSD; }
44+
45+
TSD<Allocator> *operator->() {
46+
CurrentTSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
47+
return CurrentTSD;
48+
}
49+
50+
private:
51+
TSD<Allocator> *CurrentTSD;
52+
bool UnlockRequired;
53+
};
54+
3055
void init(Allocator *Instance) REQUIRES(Mutex) {
3156
DCHECK(!Initialized);
3257
Instance->init();
@@ -74,23 +99,6 @@ template <class Allocator> struct TSDRegistryExT {
7499
initThread(Instance, MinimalInit);
75100
}
76101

77-
// TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
78-
// embedding the logic into TSD or always locking the TSD. It will enable us
79-
// to properly mark thread annotation here and adding proper runtime
80-
// assertions in the member functions of TSD. For example, assert the lock is
81-
// acquired before calling TSD::commitBack().
82-
ALWAYS_INLINE TSD<Allocator> *
83-
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
84-
if (LIKELY(State.InitState == ThreadState::Initialized &&
85-
!atomic_load(&Disabled, memory_order_acquire))) {
86-
*UnlockRequired = false;
87-
return &ThreadTSD;
88-
}
89-
FallbackTSD.lock();
90-
*UnlockRequired = true;
91-
return &FallbackTSD;
92-
}
93-
94102
// To disable the exclusive TSD registry, we effectively lock the fallback TSD
95103
// and force all threads to attempt to use it instead of their local one.
96104
void disable() NO_THREAD_SAFETY_ANALYSIS {
@@ -123,6 +131,18 @@ template <class Allocator> struct TSDRegistryExT {
123131
}
124132

125133
private:
134+
ALWAYS_INLINE TSD<Allocator> *
135+
getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
136+
if (LIKELY(State.InitState == ThreadState::Initialized &&
137+
!atomic_load(&Disabled, memory_order_acquire))) {
138+
*UnlockRequired = false;
139+
return &ThreadTSD;
140+
}
141+
FallbackTSD.lock();
142+
*UnlockRequired = true;
143+
return &FallbackTSD;
144+
}
145+
126146
// Using minimal initialization allows for global initialization while keeping
127147
// the thread specific structure untouched. The fallback structure will be
128148
// used instead.

0 commit comments

Comments
 (0)