mirror of
https://github.com/intel/llvm.git
synced 2026-02-08 17:28:30 +08:00
[scudo] Introduce Chunk::getHeaderSize
Summary: Instead of using `AlignedChunkHeaderSize`, introduce a `constexpr` function `getHeaderSize` in the `Chunk` namespace. Switch `RoundUpTo` to a `constexpr` as well (so we can use it in `constexpr` declarations). Mark a few variables in the areas touched as `const`. Overall this has no functional change, and is mostly to make things a bit more consistent. Reviewers: alekseyshl Reviewed By: alekseyshl Subscribers: delcypher, #sanitizers, llvm-commits Differential Revision: https://reviews.llvm.org/D43772 llvm-svn: 326206
This commit is contained in:
@@ -69,18 +69,17 @@ namespace Chunk {
|
||||
// prevent this, we work with a local copy of the header.
|
||||
static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
|
||||
return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
|
||||
AlignedChunkHeaderSize -
|
||||
(Header->Offset << MinAlignmentLog));
|
||||
getHeaderSize() - (Header->Offset << MinAlignmentLog));
|
||||
}
|
||||
|
||||
static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
|
||||
return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
|
||||
AlignedChunkHeaderSize);
|
||||
getHeaderSize());
|
||||
}
|
||||
static INLINE
|
||||
const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
|
||||
return reinterpret_cast<const AtomicPackedHeader *>(
|
||||
reinterpret_cast<uptr>(Ptr) - AlignedChunkHeaderSize);
|
||||
reinterpret_cast<uptr>(Ptr) - getHeaderSize());
|
||||
}
|
||||
|
||||
static INLINE bool isAligned(const void *Ptr) {
|
||||
@@ -92,9 +91,8 @@ namespace Chunk {
|
||||
static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
|
||||
const uptr Size = getBackendAllocator().getActuallyAllocatedSize(
|
||||
getBackendPtr(Ptr, Header), Header->ClassId);
|
||||
if (Size == 0)
|
||||
return 0;
|
||||
return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
|
||||
DCHECK_NE(Size, 0);
|
||||
return Size - getHeaderSize() - (Header->Offset << MinAlignmentLog);
|
||||
}
|
||||
|
||||
// Compute the checksum of the chunk pointer and its header.
|
||||
@@ -251,7 +249,7 @@ struct ScudoAllocator {
|
||||
const uptr MaxPrimaryAlignment =
|
||||
1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
|
||||
const uptr MaxOffset =
|
||||
(MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
|
||||
(MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
|
||||
Header.Offset = MaxOffset;
|
||||
if (Header.Offset != MaxOffset) {
|
||||
dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
|
||||
@@ -368,9 +366,10 @@ struct ScudoAllocator {
|
||||
if (UNLIKELY(Size == 0))
|
||||
Size = 1;
|
||||
|
||||
uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
|
||||
uptr AlignedSize = (Alignment > MinAlignment) ?
|
||||
NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
|
||||
const uptr NeededSize = RoundUpTo(Size, MinAlignment) +
|
||||
Chunk::getHeaderSize();
|
||||
const uptr AlignedSize = (Alignment > MinAlignment) ?
|
||||
NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
|
||||
if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize))
|
||||
return FailureHandler::OnBadRequest();
|
||||
|
||||
@@ -403,7 +402,7 @@ struct ScudoAllocator {
|
||||
BackendAllocator.getActuallyAllocatedSize(BackendPtr, ClassId));
|
||||
|
||||
UnpackedHeader Header = {};
|
||||
uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + AlignedChunkHeaderSize;
|
||||
uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
|
||||
if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
|
||||
// Since the Secondary takes care of alignment, a non-aligned pointer
|
||||
// means it is from the Primary. It is also the only case where the offset
|
||||
@@ -505,7 +504,7 @@ struct ScudoAllocator {
|
||||
}
|
||||
}
|
||||
}
|
||||
uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
|
||||
const uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes :
|
||||
Chunk::getUsableSize(Ptr, &Header) - Header.SizeOrUnusedBytes;
|
||||
if (DeleteSizeMismatch) {
|
||||
if (DeleteSize && DeleteSize != Size) {
|
||||
|
||||
@@ -59,9 +59,17 @@ const uptr MaxAlignmentLog = 24; // 16 MB
|
||||
const uptr MinAlignment = 1 << MinAlignmentLog;
|
||||
const uptr MaxAlignment = 1 << MaxAlignmentLog;
|
||||
|
||||
const uptr ChunkHeaderSize = sizeof(PackedHeader);
|
||||
const uptr AlignedChunkHeaderSize =
|
||||
(ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1);
|
||||
// constexpr version of __sanitizer::RoundUp without the extraneous CHECK.
|
||||
// This way we can use it in constexpr variables and functions declarations.
|
||||
constexpr uptr RoundUpTo(uptr Size, uptr Boundary) {
|
||||
return (Size + Boundary - 1) & ~(Boundary - 1);
|
||||
}
|
||||
|
||||
namespace Chunk {
|
||||
constexpr uptr getHeaderSize() {
|
||||
return RoundUpTo(sizeof(PackedHeader), MinAlignment);
|
||||
}
|
||||
}
|
||||
|
||||
#if SANITIZER_CAN_USE_ALLOCATOR64
|
||||
const uptr AllocatorSpace = ~0ULL;
|
||||
@@ -97,11 +105,6 @@ struct AP32 {
|
||||
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
|
||||
#endif // SANITIZER_CAN_USE_ALLOCATOR64
|
||||
|
||||
// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own.
|
||||
INLINE uptr RoundUpTo(uptr Size, uptr Boundary) {
|
||||
return (Size + Boundary - 1) & ~(Boundary - 1);
|
||||
}
|
||||
|
||||
#include "scudo_allocator_secondary.h"
|
||||
#include "scudo_allocator_combined.h"
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ class ScudoLargeMmapAllocator {
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
||||
const uptr UserSize = Size - AlignedChunkHeaderSize;
|
||||
const uptr UserSize = Size - Chunk::getHeaderSize();
|
||||
// The Scudo frontend prevents us from allocating more than
|
||||
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
|
||||
uptr MapSize = Size + AlignedReservedAddressRangeSize;
|
||||
@@ -80,7 +80,7 @@ class ScudoLargeMmapAllocator {
|
||||
// Actually mmap the memory, preserving the guard pages on either side
|
||||
CHECK_EQ(MapBeg + PageSize,
|
||||
AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize));
|
||||
const uptr Ptr = UserBeg - AlignedChunkHeaderSize;
|
||||
const uptr Ptr = UserBeg - Chunk::getHeaderSize();
|
||||
ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
|
||||
*StoredRange = AddressRange;
|
||||
|
||||
@@ -129,9 +129,9 @@ class ScudoLargeMmapAllocator {
|
||||
}
|
||||
|
||||
static constexpr uptr AlignedReservedAddressRangeSize =
|
||||
(sizeof(ReservedAddressRange) + MinAlignment - 1) & ~(MinAlignment - 1);
|
||||
RoundUpTo(sizeof(ReservedAddressRange), MinAlignment);
|
||||
static constexpr uptr HeadersSize =
|
||||
AlignedReservedAddressRangeSize + AlignedChunkHeaderSize;
|
||||
AlignedReservedAddressRangeSize + Chunk::getHeaderSize();
|
||||
|
||||
uptr PageSizeCached;
|
||||
SpinMutex StatsMutex;
|
||||
|
||||
Reference in New Issue
Block a user