mirror of
https://github.com/intel/llvm.git
synced 2026-01-25 19:44:38 +08:00
[scudo] Fix the use of ASSERT_CAPABILITY in TSD (#68273)
In getCache()/getQuarantineCache(), they return a reference to variable guarded by a mutex. After #67776, thread-safey analysis checks if a variable return by reference has the lock held. The ASSERT_CAPABILITY only claims after calling that function, the lock will be held. But not asserting that the lock is held *before* calling that function. In the patch, we switch to use REQUIRES() and assertLocked() to mark the code paths. Also remove the misused ASSERT_CAPABILITY. Fixes #67795, #67796
This commit is contained in:
@@ -245,12 +245,14 @@ public:
|
||||
// - unlinking the local stats from the global ones (destroying the cache does
|
||||
// the last two items).
|
||||
void commitBack(TSD<ThisT> *TSD) {
|
||||
TSD->assertLocked(/*BypassCheck=*/true);
|
||||
Quarantine.drain(&TSD->getQuarantineCache(),
|
||||
QuarantineCallback(*this, TSD->getCache()));
|
||||
TSD->getCache().destroy(&Stats);
|
||||
}
|
||||
|
||||
void drainCache(TSD<ThisT> *TSD) {
|
||||
TSD->assertLocked(/*BypassCheck=*/true);
|
||||
Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
|
||||
QuarantineCallback(*this, TSD->getCache()));
|
||||
TSD->getCache().drain();
|
||||
@@ -363,6 +365,7 @@ public:
|
||||
DCHECK_NE(ClassId, 0U);
|
||||
bool UnlockRequired;
|
||||
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
Block = TSD->getCache().allocate(ClassId);
|
||||
// If the allocation failed, the most likely reason with a 32-bit primary
|
||||
// is the region being full. In that event, retry in each successively
|
||||
@@ -1147,6 +1150,7 @@ private:
|
||||
if (LIKELY(ClassId)) {
|
||||
bool UnlockRequired;
|
||||
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
const bool CacheDrained =
|
||||
TSD->getCache().deallocate(ClassId, BlockBegin);
|
||||
if (UnlockRequired)
|
||||
@@ -1166,6 +1170,7 @@ private:
|
||||
} else {
|
||||
bool UnlockRequired;
|
||||
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
Quarantine.put(&TSD->getQuarantineCache(),
|
||||
QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
|
||||
if (UnlockRequired)
|
||||
|
||||
@@ -512,6 +512,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
|
||||
|
||||
bool UnlockRequired;
|
||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_TRUE(!TSD->getCache().isEmpty());
|
||||
TSD->getCache().drain();
|
||||
EXPECT_TRUE(TSD->getCache().isEmpty());
|
||||
@@ -536,6 +537,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
|
||||
|
||||
bool UnlockRequired;
|
||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_TRUE(TSD->getCache().isEmpty());
|
||||
EXPECT_EQ(TSD->getQuarantineCache().getSize(), 0U);
|
||||
EXPECT_TRUE(Allocator->getQuarantine()->isEmpty());
|
||||
@@ -842,6 +844,7 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
|
||||
|
||||
bool UnlockRequired;
|
||||
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
TSD->getCache().drain();
|
||||
|
||||
Allocator->releaseToOS(scudo::ReleaseToOS::Force);
|
||||
|
||||
@@ -101,6 +101,7 @@ template <class AllocatorT> static void testRegistry() {
|
||||
|
||||
bool UnlockRequired;
|
||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
EXPECT_EQ(TSD->getCache().Canary, 0U);
|
||||
if (UnlockRequired)
|
||||
@@ -108,6 +109,7 @@ template <class AllocatorT> static void testRegistry() {
|
||||
|
||||
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
|
||||
TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
EXPECT_EQ(TSD->getCache().Canary, 0U);
|
||||
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
|
||||
@@ -137,6 +139,7 @@ template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
|
||||
Registry->initThreadMaybe(Allocator, /*MinimalInit=*/false);
|
||||
bool UnlockRequired;
|
||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
// For an exclusive TSD, the cache should be empty. We cannot guarantee the
|
||||
// same for a shared TSD.
|
||||
@@ -195,6 +198,7 @@ static void stressSharedRegistry(MockAllocator<SharedCaches> *Allocator) {
|
||||
bool UnlockRequired;
|
||||
for (scudo::uptr I = 0; I < 4096U; I++) {
|
||||
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
|
||||
TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
|
||||
EXPECT_NE(TSD, nullptr);
|
||||
Set.insert(reinterpret_cast<void *>(TSD));
|
||||
if (UnlockRequired)
|
||||
|
||||
@@ -53,8 +53,14 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
|
||||
inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
|
||||
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
|
||||
|
||||
void commitBack(Allocator *Instance) ASSERT_CAPABILITY(Mutex) {
|
||||
Instance->commitBack(this);
|
||||
void commitBack(Allocator *Instance) { Instance->commitBack(this); }
|
||||
|
||||
// As the comments attached to `getCache()`, the TSD doesn't always need to be
|
||||
// locked. In that case, we would only skip the check before we have all TSDs
|
||||
// locked in all paths.
|
||||
void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
|
||||
if (SCUDO_DEBUG && !BypassCheck)
|
||||
Mutex.assertHeld();
|
||||
}
|
||||
|
||||
// Ideally, we may want to assert that all the operations on
|
||||
@@ -66,11 +72,8 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
|
||||
// TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
|
||||
// TSD doesn't always require holding the lock. Add this assertion while the
|
||||
// lock is always acquired.
|
||||
typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
|
||||
return Cache;
|
||||
}
|
||||
typename Allocator::QuarantineCacheT &getQuarantineCache()
|
||||
ASSERT_CAPABILITY(Mutex) {
|
||||
typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
|
||||
typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
|
||||
return QuarantineCache;
|
||||
}
|
||||
|
||||
|
||||
@@ -120,6 +120,11 @@ struct TSDRegistrySharedT {
|
||||
TSDsArraySize);
|
||||
for (uptr I = 0; I < NumberOfTSDs; ++I) {
|
||||
TSDs[I].lock();
|
||||
// Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
|
||||
// thread annotations. However, given the TSD is only locked on shared
|
||||
// path, do the assertion in a separate path to avoid confusing the
|
||||
// analyzer.
|
||||
TSDs[I].assertLocked(/*BypassCheck=*/true);
|
||||
Str->append(" Shared TSD[%zu]:\n", I);
|
||||
TSDs[I].getCache().getStats(Str);
|
||||
TSDs[I].unlock();
|
||||
|
||||
Reference in New Issue
Block a user