mirror of
https://github.com/intel/llvm.git
synced 2026-01-28 01:04:49 +08:00
MSan divides the virtual address space into APP, INVALID, SHADOW and ORIGIN memory. The allocator usually just steals a bit of the APP address space: typically the bottom portion of the PIE binaries section, which works because the Linux kernel maps from the top of the PIE binaries section. However, if ASLR is very aggressive, the binary may end up mapped in the same location where the allocator wants to live; this results in a segfault. This patch adds in a MappingDesc::ALLOCATOR type and enforces that the memory range for the allocator is not occupied by anything else. Since the allocator range information is not readily available in msan.h, we duplicate the information from msan_allocator.cpp. Note: aggressive ASLR can also lead to a different type of failure, where the PIE binaries/libraries are mapped entirely outside of the APP/ALLOCATOR sections; that will be addressed in a separate patch (https://github.com/llvm/llvm-project/pull/85142).
289 lines
8.2 KiB
C++
289 lines
8.2 KiB
C++
//===-- msan_linux.cpp ----------------------------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file is a part of MemorySanitizer.
|
|
//
|
|
// Linux-, NetBSD- and FreeBSD-specific code.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "sanitizer_common/sanitizer_platform.h"
|
|
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
|
|
|
# include <elf.h>
|
|
# include <link.h>
|
|
# include <pthread.h>
|
|
# include <signal.h>
|
|
# include <stdio.h>
|
|
# include <stdlib.h>
|
|
# include <sys/resource.h>
|
|
# include <sys/time.h>
|
|
# include <unistd.h>
|
|
# include <unwind.h>
|
|
|
|
# include "msan.h"
|
|
# include "msan_allocator.h"
|
|
# include "msan_chained_origin_depot.h"
|
|
# include "msan_report.h"
|
|
# include "msan_thread.h"
|
|
# include "sanitizer_common/sanitizer_common.h"
|
|
# include "sanitizer_common/sanitizer_procmaps.h"
|
|
# include "sanitizer_common/sanitizer_stackdepot.h"
|
|
|
|
namespace __msan {
|
|
|
|
void ReportMapRange(const char *descr, uptr beg, uptr size) {
|
|
if (size > 0) {
|
|
uptr end = beg + size - 1;
|
|
VPrintf(1, "%s : 0x%zx - 0x%zx\n", descr, beg, end);
|
|
}
|
|
}
|
|
|
|
static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
|
|
if (size > 0) {
|
|
uptr end = beg + size - 1;
|
|
if (!MemoryRangeIsAvailable(beg, end)) {
|
|
Printf("FATAL: Memory range 0x%zx - 0x%zx is not available.\n", beg, end);
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
|
|
if (size > 0) {
|
|
void *addr = MmapFixedNoAccess(beg, size, name);
|
|
if (beg == 0 && addr) {
|
|
// Depending on the kernel configuration, we may not be able to protect
|
|
// the page at address zero.
|
|
uptr gap = 16 * GetPageSizeCached();
|
|
beg += gap;
|
|
size -= gap;
|
|
addr = MmapFixedNoAccess(beg, size, name);
|
|
}
|
|
if ((uptr)addr != beg) {
|
|
uptr end = beg + size - 1;
|
|
Printf("FATAL: Cannot protect memory range 0x%zx - 0x%zx (%s).\n", beg,
|
|
end, name);
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static void CheckMemoryLayoutSanity() {
|
|
uptr prev_end = 0;
|
|
for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
|
|
uptr start = kMemoryLayout[i].start;
|
|
uptr end = kMemoryLayout[i].end;
|
|
MappingDesc::Type type = kMemoryLayout[i].type;
|
|
CHECK_LT(start, end);
|
|
CHECK_EQ(prev_end, start);
|
|
CHECK(addr_is_type(start, type));
|
|
CHECK(addr_is_type((start + end) / 2, type));
|
|
CHECK(addr_is_type(end - 1, type));
|
|
if (type == MappingDesc::APP || type == MappingDesc::ALLOCATOR) {
|
|
uptr addr = start;
|
|
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
|
|
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
|
|
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
|
|
|
|
addr = (start + end) / 2;
|
|
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
|
|
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
|
|
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
|
|
|
|
addr = end - 1;
|
|
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
|
|
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
|
|
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
|
|
}
|
|
prev_end = end;
|
|
}
|
|
}
|
|
|
|
bool InitShadow(bool init_origins) {
|
|
// Let user know mapping parameters first.
|
|
VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
|
|
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
|
|
VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
|
|
kMemoryLayout[i].end - 1);
|
|
|
|
CheckMemoryLayoutSanity();
|
|
|
|
if (!MEM_IS_APP(&__msan_init)) {
|
|
Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
|
|
reinterpret_cast<void *>(&__msan_init));
|
|
return false;
|
|
}
|
|
|
|
const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
|
|
|
|
for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
|
|
uptr start = kMemoryLayout[i].start;
|
|
uptr end = kMemoryLayout[i].end;
|
|
uptr size = end - start;
|
|
MappingDesc::Type type = kMemoryLayout[i].type;
|
|
|
|
// Check if the segment should be mapped based on platform constraints.
|
|
if (start >= maxVirtualAddress)
|
|
continue;
|
|
|
|
bool map = type == MappingDesc::SHADOW ||
|
|
(init_origins && type == MappingDesc::ORIGIN);
|
|
bool protect = type == MappingDesc::INVALID ||
|
|
(!init_origins && type == MappingDesc::ORIGIN);
|
|
CHECK(!(map && protect));
|
|
if (!map && !protect) {
|
|
CHECK(type == MappingDesc::APP || type == MappingDesc::ALLOCATOR);
|
|
|
|
if (type == MappingDesc::ALLOCATOR &&
|
|
!CheckMemoryRangeAvailability(start, size))
|
|
return false;
|
|
}
|
|
if (map) {
|
|
if (!CheckMemoryRangeAvailability(start, size))
|
|
return false;
|
|
if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
|
|
return false;
|
|
if (common_flags()->use_madv_dontdump)
|
|
DontDumpShadowMemory(start, size);
|
|
}
|
|
if (protect) {
|
|
if (!CheckMemoryRangeAvailability(start, size))
|
|
return false;
|
|
if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static void MsanAtExit(void) {
|
|
if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
|
|
ReportStats();
|
|
if (msan_report_count > 0) {
|
|
ReportAtExitStatistics();
|
|
if (common_flags()->exitcode)
|
|
internal__exit(common_flags()->exitcode);
|
|
}
|
|
}
|
|
|
|
void InstallAtExitHandler() {
|
|
atexit(MsanAtExit);
|
|
}
|
|
|
|
// ---------------------- TSD ---------------- {{{1
|
|
|
|
#if SANITIZER_NETBSD
|
|
// Thread Static Data cannot be used in early init on NetBSD.
|
|
// Reuse the MSan TSD API for compatibility with existing code
|
|
// with an alternative implementation.
|
|
|
|
static void (*tsd_destructor)(void *tsd) = nullptr;
|
|
|
|
struct tsd_key {
|
|
tsd_key() : key(nullptr) {}
|
|
~tsd_key() {
|
|
CHECK(tsd_destructor);
|
|
if (key)
|
|
(*tsd_destructor)(key);
|
|
}
|
|
MsanThread *key;
|
|
};
|
|
|
|
static thread_local struct tsd_key key;
|
|
|
|
void MsanTSDInit(void (*destructor)(void *tsd)) {
|
|
CHECK(!tsd_destructor);
|
|
tsd_destructor = destructor;
|
|
}
|
|
|
|
MsanThread *GetCurrentThread() {
|
|
CHECK(tsd_destructor);
|
|
return key.key;
|
|
}
|
|
|
|
void SetCurrentThread(MsanThread *tsd) {
|
|
CHECK(tsd_destructor);
|
|
CHECK(tsd);
|
|
CHECK(!key.key);
|
|
key.key = tsd;
|
|
}
|
|
|
|
void MsanTSDDtor(void *tsd) {
|
|
CHECK(tsd_destructor);
|
|
CHECK_EQ(key.key, tsd);
|
|
key.key = nullptr;
|
|
// Make sure that signal handler can not see a stale current thread pointer.
|
|
atomic_signal_fence(memory_order_seq_cst);
|
|
MsanThread::TSDDtor(tsd);
|
|
}
|
|
#else
|
|
static pthread_key_t tsd_key;
|
|
static bool tsd_key_inited = false;
|
|
|
|
void MsanTSDInit(void (*destructor)(void *tsd)) {
|
|
CHECK(!tsd_key_inited);
|
|
tsd_key_inited = true;
|
|
CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
|
|
}
|
|
|
|
static THREADLOCAL MsanThread* msan_current_thread;
|
|
|
|
MsanThread *GetCurrentThread() {
|
|
return msan_current_thread;
|
|
}
|
|
|
|
void SetCurrentThread(MsanThread *t) {
|
|
// Make sure we do not reset the current MsanThread.
|
|
CHECK_EQ(0, msan_current_thread);
|
|
msan_current_thread = t;
|
|
// Make sure that MsanTSDDtor gets called at the end.
|
|
CHECK(tsd_key_inited);
|
|
pthread_setspecific(tsd_key, (void *)t);
|
|
}
|
|
|
|
void MsanTSDDtor(void *tsd) {
|
|
MsanThread *t = (MsanThread*)tsd;
|
|
if (t->destructor_iterations_ > 1) {
|
|
t->destructor_iterations_--;
|
|
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
|
|
return;
|
|
}
|
|
msan_current_thread = nullptr;
|
|
// Make sure that signal handler can not see a stale current thread pointer.
|
|
atomic_signal_fence(memory_order_seq_cst);
|
|
MsanThread::TSDDtor(tsd);
|
|
}
|
|
# endif
|
|
|
|
static void BeforeFork() {
|
|
// Usually we lock ThreadRegistry, but msan does not have one.
|
|
LockAllocator();
|
|
StackDepotLockBeforeFork();
|
|
ChainedOriginDepotBeforeFork();
|
|
}
|
|
|
|
static void AfterFork(bool fork_child) {
|
|
ChainedOriginDepotAfterFork(fork_child);
|
|
StackDepotUnlockAfterFork(fork_child);
|
|
UnlockAllocator();
|
|
// Usually we unlock ThreadRegistry, but msan does not have one.
|
|
}
|
|
|
|
void InstallAtForkHandler() {
|
|
pthread_atfork(
|
|
&BeforeFork, []() { AfterFork(/* fork_child= */ false); },
|
|
[]() { AfterFork(/* fork_child= */ true); });
|
|
}
|
|
|
|
} // namespace __msan
|
|
|
|
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|