[scudo] Clean up tests.

Modify the tests so that all clang warnings can be turned up to high.

Fix all places flagged by -Wconversion.

Fix a few unused variables not marked with UNUSED.

For the memtag testing, only compile some tests for 64 bit since
compiling them on 32 bit leads to warnings/errors. All of the tests
are already skipped on 32 bit OSes, so this will not affect any
real tests.

Reviewed By: Chia-hungDuan

Differential Revision: https://reviews.llvm.org/D155749
This commit is contained in:
Christopher Ferris
2023-07-19 13:26:38 -07:00
parent 2f34288b24
commit af41f79f40
8 changed files with 53 additions and 29 deletions

View File

@@ -154,9 +154,10 @@ void ScudoCombinedTest<Config>::BasicTest(scudo::uptr SizeLog) {
for (scudo::uptr AlignLog = MinAlignLog; AlignLog <= 16U; AlignLog++) {
const scudo::uptr Align = 1U << AlignLog;
for (scudo::sptr Delta = -32; Delta <= 32; Delta++) {
if (static_cast<scudo::sptr>(1U << SizeLog) + Delta < 0)
if ((1LL << SizeLog) + Delta < 0)
continue;
const scudo::uptr Size = (1U << SizeLog) + Delta;
const scudo::uptr Size =
static_cast<scudo::uptr>((1LL << SizeLog) + Delta);
void *P = Allocator->allocate(Size, Origin, Align);
EXPECT_NE(P, nullptr);
EXPECT_TRUE(Allocator->isOwned(P));
@@ -333,7 +334,8 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, ReallocateSame) {
const char Marker = 0xab;
memset(P, Marker, ReallocSize);
for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) {
const scudo::uptr NewSize = ReallocSize + Delta;
const scudo::uptr NewSize =
static_cast<scudo::uptr>(static_cast<scudo::sptr>(ReallocSize) + Delta);
void *NewP = Allocator->reallocate(P, NewSize);
EXPECT_EQ(NewP, P);
for (scudo::uptr I = 0; I < ReallocSize - 32; I++)
@@ -355,11 +357,13 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, IterateOverChunks) {
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
static_cast<scudo::uptr>(std::rand()) %
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
Origin));
Allocator->disable();
Allocator->iterateOverChunks(
0U, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1),
[](uintptr_t Base, size_t Size, void *Arg) {
[](uintptr_t Base, UNUSED size_t Size, void *Arg) {
std::vector<void *> *V = reinterpret_cast<std::vector<void *> *>(Arg);
void *P = reinterpret_cast<void *>(Base);
EXPECT_NE(std::find(V->begin(), V->end(), P), V->end());
@@ -444,7 +448,9 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
static_cast<scudo::uptr>(std::rand()) %
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
Origin));
for (auto P : V)
Allocator->deallocate(P, Origin);
@@ -463,7 +469,9 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ForceCacheDrain) NO_THREAD_SAFETY_ANALYSIS {
std::vector<void *> V;
for (scudo::uptr I = 0; I < 64U; I++)
V.push_back(Allocator->allocate(
rand() % (TypeParam::Primary::SizeClassMap::MaxSize / 2U), Origin));
static_cast<scudo::uptr>(std::rand()) %
(TypeParam::Primary::SizeClassMap::MaxSize / 2U),
Origin));
for (auto P : V)
Allocator->deallocate(P, Origin);
@@ -494,7 +502,7 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ThreadedCombined) {
}
std::vector<std::pair<void *, scudo::uptr>> V;
for (scudo::uptr I = 0; I < 256U; I++) {
const scudo::uptr Size = std::rand() % 4096U;
const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) % 4096U;
void *P = Allocator->allocate(Size, Origin);
// A region could have ran out of memory, resulting in a null P.
if (P)
@@ -727,17 +735,17 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, ReallocateInPlaceStress) {
// Regression test: make realloc-in-place happen at the very right end of a
// mapped region.
constexpr int nPtrs = 10000;
for (int i = 1; i < 32; ++i) {
constexpr size_t nPtrs = 10000;
for (scudo::uptr i = 1; i < 32; ++i) {
scudo::uptr Size = 16 * i - 1;
std::vector<void *> Ptrs;
for (int i = 0; i < nPtrs; ++i) {
for (size_t i = 0; i < nPtrs; ++i) {
void *P = Allocator->allocate(Size, Origin);
P = Allocator->reallocate(P, Size + 1);
Ptrs.push_back(P);
}
for (int i = 0; i < nPtrs; ++i)
for (size_t i = 0; i < nPtrs; ++i)
Allocator->deallocate(Ptrs[i], Origin);
}
}

View File

@@ -76,12 +76,16 @@ TEST_F(MemtagTest, ArchMemoryTagGranuleSize) {
}
TEST_F(MemtagTest, ExtractTag) {
// The test is already skipped on anything other than 64 bit. But
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
#if defined(__LP64__)
uptr Tags = 0;
// Try all value for the top byte and check the tags values are in the
// expected range.
for (u64 Top = 0; Top < 0x100; ++Top)
Tags = Tags | (1u << extractTag(Addr | (Top << 56)));
EXPECT_EQ(0xffffull, Tags);
#endif
}
TEST_F(MemtagDeathTest, AddFixedTag) {
@@ -121,10 +125,14 @@ TEST_F(MemtagTest, SelectRandomTag) {
}
TEST_F(MemtagTest, SelectRandomTagWithMask) {
// The test is already skipped on anything other than 64 bit. But
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
#if defined(__LP64__)
for (uptr j = 0; j < 32; ++j) {
for (uptr i = 0; i < 1000; ++i)
EXPECT_NE(j, extractTag(selectRandomTag(Addr, 1ull << j)));
}
#endif
}
TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(LoadStoreTagUnaligned)) {
@@ -158,6 +166,9 @@ TEST_F(MemtagDeathTest, SKIP_NO_DEBUG(StoreTagsUnaligned)) {
}
TEST_F(MemtagTest, StoreTags) {
// The test is already skipped on anything other than 64 bit. But
// compiling on 32 bit leads to warnings/errors, so skip compiling the test.
#if defined(__LP64__)
const uptr MaxTaggedSize = 4 * archMemoryTagGranuleSize();
for (uptr Size = 0; Size <= MaxTaggedSize; ++Size) {
uptr NoTagBegin = Addr + archMemoryTagGranuleSize();
@@ -186,6 +197,7 @@ TEST_F(MemtagTest, StoreTags) {
// Reset tags without using StoreTags.
MemMap.releasePagesToOS(Addr, BufferSize);
}
#endif
}
} // namespace scudo

View File

@@ -253,7 +253,8 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryIterate) {
Cache.init(nullptr, Allocator.get());
std::vector<std::pair<scudo::uptr, void *>> V;
for (scudo::uptr I = 0; I < 64U; I++) {
const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize;
const scudo::uptr Size =
static_cast<scudo::uptr>(std::rand()) % Primary::SizeClassMap::MaxSize;
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
void *P = Cache.allocate(ClassId);
V.push_back(std::make_pair(ClassId, P));
@@ -300,8 +301,8 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, PrimaryThreaded) {
Cv.wait(Lock);
}
for (scudo::uptr I = 0; I < 256U; I++) {
const scudo::uptr Size =
std::rand() % Primary::SizeClassMap::MaxSize / 4;
const scudo::uptr Size = static_cast<scudo::uptr>(std::rand()) %
Primary::SizeClassMap::MaxSize / 4;
const scudo::uptr ClassId =
Primary::SizeClassMap::getClassIdBySize(Size);
void *P = Cache.allocate(ClassId);

View File

@@ -134,8 +134,9 @@ TEST(ScudoReleaseTest, FreePagesRangeTracker) {
// Strip trailing '.'-pages before comparing the results as they are not
// going to be reported to range_recorder anyway.
const char *LastX = strrchr(TestCase, 'x');
std::string Expected(TestCase,
LastX == nullptr ? 0 : (LastX - TestCase + 1));
std::string Expected(
TestCase,
LastX == nullptr ? 0U : static_cast<size_t>(LastX - TestCase + 1));
EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
}
}

View File

@@ -136,10 +136,10 @@ TEST_F(MapAllocatorTest, SecondaryCombinations) {
AlignLog++) {
const scudo::uptr Align = 1U << AlignLog;
for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
if ((1LL << SizeLog) + Delta <= 0)
continue;
const scudo::uptr UserSize =
scudo::roundUp((1U << SizeLog) + Delta, MinAlign);
const scudo::uptr UserSize = scudo::roundUp(
static_cast<scudo::uptr>((1LL << SizeLog) + Delta), MinAlign);
const scudo::uptr Size =
HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
void *P = Allocator->allocate(Options, Size, Align);
@@ -160,7 +160,8 @@ TEST_F(MapAllocatorTest, SecondaryIterate) {
std::vector<void *> V;
const scudo::uptr PageSize = scudo::getPageSizeCached();
for (scudo::uptr I = 0; I < 32U; I++)
V.push_back(Allocator->allocate(Options, (std::rand() % 16) * PageSize));
V.push_back(Allocator->allocate(
Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize));
auto Lambda = [&V](scudo::uptr Block) {
EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
V.end());
@@ -215,8 +216,9 @@ struct MapAllocatorWithReleaseTest : public MapAllocatorTest {
}
for (scudo::uptr I = 0; I < 128U; I++) {
// Deallocate 75% of the blocks.
const bool Deallocate = (rand() & 3) != 0;
void *P = Allocator->allocate(Options, (std::rand() % 16) * PageSize);
const bool Deallocate = (std::rand() & 3) != 0;
void *P = Allocator->allocate(
Options, (static_cast<scudo::uptr>(std::rand()) % 16U) * PageSize);
if (Deallocate)
Allocator->deallocate(Options, P);
else

View File

@@ -38,7 +38,7 @@ public:
void unmapTestOnly() { TSDRegistry.unmapTestOnly(this); }
void initCache(CacheT *Cache) { *Cache = {}; }
void commitBack(scudo::TSD<MockAllocator> *TSD) {}
void commitBack(UNUSED scudo::TSD<MockAllocator> *TSD) {}
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
void callPostInitCallback() {}

View File

@@ -267,7 +267,7 @@ TEST(ScudoWrappersCTest, MallOpt) {
TEST(ScudoWrappersCTest, OtherAlloc) {
#if HAVE_PVALLOC
const size_t PageSize = sysconf(_SC_PAGESIZE);
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
void *P = pvalloc(Size);
EXPECT_NE(P, nullptr);
@@ -329,7 +329,7 @@ TEST(ScudoWrappersCTest, MallInfo2) {
static uintptr_t BoundaryP;
static size_t Count;
static void callback(uintptr_t Base, size_t Size, void *Arg) {
static void callback(uintptr_t Base, UNUSED size_t Size, UNUSED void *Arg) {
if (scudo::archSupportsMemoryTagging()) {
Base = scudo::untagPointer(Base);
BoundaryP = scudo::untagPointer(BoundaryP);
@@ -343,7 +343,7 @@ static void callback(uintptr_t Base, size_t Size, void *Arg) {
// aligned on a page, then run the malloc_iterate on both the pages that the
// block is a boundary for. It must only be seen once by the callback function.
TEST(ScudoWrappersCTest, MallocIterateBoundary) {
const size_t PageSize = sysconf(_SC_PAGESIZE);
const size_t PageSize = static_cast<size_t>(sysconf(_SC_PAGESIZE));
#if SCUDO_ANDROID
// Android uses a 16 byte alignment for both 32 bit and 64 bit.
const size_t BlockDelta = 16U;
@@ -461,7 +461,7 @@ static pthread_mutex_t Mutex;
static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
static bool Ready;
static void *enableMalloc(void *Unused) {
static void *enableMalloc(UNUSED void *Unused) {
// Initialize the allocator for this thread.
void *P = malloc(Size);
EXPECT_NE(P, nullptr);

View File

@@ -103,7 +103,7 @@ static void stressNew() {
Cv.wait(Lock);
}
for (size_t I = 0; I < 256U; I++) {
const size_t N = std::rand() % 128U;
const size_t N = static_cast<size_t>(std::rand()) % 128U;
uintptr_t *P = new uintptr_t[N];
if (P) {
memset(P, 0x42, sizeof(uintptr_t) * N);