upx/src/util/membuffer.cpp

291 lines
10 KiB
C++
Raw Normal View History

/* membuffer.cpp --
2000-05-20 00:04:55 +08:00
This file is part of the UPX executable compressor.
2023-01-02 02:49:30 +08:00
Copyright (C) 1996-2023 Markus Franz Xaver Johannes Oberhumer
Copyright (C) 1996-2023 Laszlo Molnar
All Rights Reserved.
2000-05-20 00:04:55 +08:00
UPX and the UCL library are free software; you can redistribute them
and/or modify them under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING.
If not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
Markus F.X.J. Oberhumer Laszlo Molnar
2016-09-28 18:25:01 +08:00
<markus@oberhumer.com> <ezerotven+github@gmail.com>
2000-05-20 00:04:55 +08:00
*/
#include "../conf.h"
#include "membuffer.h"
2000-05-20 00:04:55 +08:00
// extra functions to reduce dependency on membuffer.h
void *membuffer_get_void_ptr(MemBuffer &mb) { return mb.getVoidPtr(); }
unsigned membuffer_get_size(MemBuffer &mb) { return mb.getSize(); }
2000-05-20 00:04:55 +08:00
2023-01-25 04:52:10 +08:00
/*static*/ MemBuffer::Stats MemBuffer::stats;
#if DEBUG
#define debug_set(var, expr) (var) = (expr)
#else
#define debug_set(var, expr) /*empty*/
#endif
2000-05-20 00:04:55 +08:00
/*************************************************************************
2016-10-06 18:31:03 +08:00
// bool use_simple_mcheck()
2000-05-20 00:04:55 +08:00
**************************************************************************/
#if defined(__SANITIZE_ADDRESS__)
2023-01-25 04:52:10 +08:00
static forceinline constexpr bool use_simple_mcheck() { return false; }
#elif (WITH_VALGRIND) && defined(RUNNING_ON_VALGRIND)
2016-10-06 18:31:03 +08:00
static int use_simple_mcheck_flag = -1;
2023-01-25 04:52:10 +08:00
static noinline void use_simple_mcheck_init() {
2016-10-06 18:31:03 +08:00
use_simple_mcheck_flag = 1;
if (RUNNING_ON_VALGRIND) {
2016-10-06 18:31:03 +08:00
use_simple_mcheck_flag = 0;
// fprintf(stderr, "upx: detected RUNNING_ON_VALGRIND\n");
}
}
2023-01-25 04:52:10 +08:00
static forceinline bool use_simple_mcheck() {
if very_unlikely (use_simple_mcheck_flag < 0)
2016-10-06 18:31:03 +08:00
use_simple_mcheck_init();
return (bool) use_simple_mcheck_flag;
}
#else
2023-01-25 04:52:10 +08:00
static forceinline constexpr bool use_simple_mcheck() { return true; }
#endif
/*************************************************************************
//
**************************************************************************/
MemBuffer::MemBuffer(upx_uint64_t size_in_bytes) {
alloc(size_in_bytes);
debug_set(debug.last_return_address_alloc, upx_return_address());
}
2000-05-20 00:04:55 +08:00
MemBuffer::~MemBuffer() { this->dealloc(); }
2000-05-20 00:04:55 +08:00
// similar to BoundedPtr, except checks only at creation
// skip == offset, take == size_in_bytes
void *MemBuffer::subref_impl(const char *errfmt, size_t skip, size_t take) {
debug_set(debug.last_return_address_subref, upx_return_address());
// check overrun and wrap-around
if (skip + take > b_size_in_bytes || skip + take < skip) {
char buf[100];
// printf is using unsigned formatting
if (!errfmt || !errfmt[0])
errfmt = "bad subref %#x %#x";
snprintf(buf, sizeof(buf), errfmt, (unsigned) skip, (unsigned) take);
throwCantPack(buf);
}
return &b[skip];
}
static unsigned width(unsigned x) {
unsigned w = 0;
if ((~0u << 16) & x) {
w += 16;
x >>= 16;
}
if ((~0u << 8) & x) {
w += 8;
x >>= 8;
}
if ((~0u << 4) & x) {
w += 4;
x >>= 4;
}
if ((~0u << 2) & x) {
w += 2;
x >>= 2;
}
if ((~0u << 1) & x) {
w += 1;
// x >>= 1;
}
return 1 + w;
}
static inline unsigned umax(unsigned a, unsigned b) { return (a >= b) ? a : b; }
unsigned MemBuffer::getSizeForCompression(unsigned uncompressed_size, unsigned extra) {
2023-01-25 04:52:10 +08:00
if (uncompressed_size == 0)
throwCantPack("invalid uncompressed_size");
const unsigned z = uncompressed_size; // fewer keystrokes and display columns
const unsigned w = umax(8, width(z - 1)); // ignore tiny offsets
2022-09-15 07:14:38 +08:00
unsigned bytes = ACC_ICONV(unsigned, mem_size(1, z)); // check
// Worst matching: All match at max_offset, which implies 3==min_match
// All literal: 1 bit overhead per literal byte
2023-01-25 04:52:10 +08:00
bytes = umax(bytes, z + z / 8);
// NRV2B: 1 byte plus 2 bits per width exceeding 8 ("ss11")
bytes = umax(bytes, (z / 3 * (8 + 2 * (w - 8) / 1)) / 8);
// NRV2E: 1 byte plus 3 bits per pair of width exceeding 7 ("ss12")
bytes = umax(bytes, (z / 3 * (8 + 3 * (w - 7) / 2)) / 8);
2023-01-14 05:07:24 +08:00
// zstd: ZSTD_COMPRESSBOUND
bytes = umax(bytes, z + (z >> 8) + ((z < (128 << 10)) ? (((128 << 10) - z) >> 11) : 0));
// extra + 256 safety for rounding
bytes = mem_size(1, bytes, extra, 256);
2023-01-25 04:52:10 +08:00
UNUSED(w);
2022-09-15 07:14:38 +08:00
return bytes;
2000-05-20 00:04:55 +08:00
}
unsigned MemBuffer::getSizeForDecompression(unsigned uncompressed_size, unsigned extra) {
2023-01-25 04:52:10 +08:00
if (uncompressed_size == 0)
throwCantPack("invalid uncompressed_size");
2022-09-15 07:14:38 +08:00
size_t bytes = mem_size(1, uncompressed_size, extra); // check
return ACC_ICONV(unsigned, bytes);
}
void MemBuffer::allocForCompression(unsigned uncompressed_size, unsigned extra) {
2023-01-25 04:52:10 +08:00
if (uncompressed_size == 0)
throwCantPack("invalid uncompressed_size");
unsigned size = getSizeForCompression(uncompressed_size, extra);
alloc(size);
debug_set(debug.last_return_address_alloc, upx_return_address());
}
void MemBuffer::allocForDecompression(unsigned uncompressed_size, unsigned extra) {
2023-01-25 04:52:10 +08:00
if (uncompressed_size == 0)
throwCantPack("invalid uncompressed_size");
unsigned size = getSizeForDecompression(uncompressed_size, extra);
alloc(size);
debug_set(debug.last_return_address_alloc, upx_return_address());
2000-05-20 00:04:55 +08:00
}
void MemBuffer::fill(unsigned off, unsigned len, int value) {
debug_set(debug.last_return_address_fill, upx_return_address());
checkState();
assert((int) off >= 0);
assert((int) len >= 0);
assert(off <= b_size_in_bytes);
assert(len <= b_size_in_bytes);
assert(off + len <= b_size_in_bytes);
if (len > 0)
memset(b + off, value, len);
}
/*************************************************************************
//
**************************************************************************/
2000-05-20 00:04:55 +08:00
2022-12-13 02:25:31 +08:00
#define PTR_BITS(p) ((unsigned) ((upx_uintptr_t) (p) &0xffffffff))
2022-09-15 07:14:38 +08:00
#define MAGIC1(p) ((PTR_BITS(p) ^ 0xfefdbeeb) | 1)
#define MAGIC2(p) ((PTR_BITS(p) ^ 0xfefdbeeb ^ 0x80024011) | 1)
2000-05-20 00:04:55 +08:00
void MemBuffer::checkState() const {
if (!b)
throwInternalError("block not allocated");
if (use_simple_mcheck()) {
2022-09-15 07:14:38 +08:00
if (get_ne32(b - 4) != MAGIC1(b))
throwInternalError("memory clobbered before allocated block 1");
2022-09-15 07:14:38 +08:00
if (get_ne32(b - 8) != b_size_in_bytes)
throwInternalError("memory clobbered before allocated block 2");
2022-09-15 07:14:38 +08:00
if (get_ne32(b + b_size_in_bytes) != MAGIC2(b))
throwInternalError("memory clobbered past end of allocated block");
}
2000-05-20 00:04:55 +08:00
}
void MemBuffer::alloc(upx_uint64_t size) {
// NOTE: we don't automatically free a used buffer
2020-12-08 12:40:17 +08:00
assert(b == nullptr);
assert(b_size_in_bytes == 0);
//
assert(size > 0);
debug_set(debug.last_return_address_alloc, upx_return_address());
2016-10-06 18:31:03 +08:00
size_t bytes = mem_size(1, size, use_simple_mcheck() ? 32 : 0);
unsigned char *p = (unsigned char *) malloc(bytes);
NO_printf("MemBuffer::alloc %llu: %p\n", size, p);
if (!p)
2007-05-08 21:28:35 +08:00
throwOutOfMemoryException();
2022-09-15 07:14:38 +08:00
b = p;
b_size_in_bytes = ACC_ICONV(unsigned, size);
if (use_simple_mcheck()) {
b = p + 16;
// store magic constants to detect buffer overruns
2022-09-15 07:14:38 +08:00
set_ne32(b - 8, b_size_in_bytes);
set_ne32(b - 4, MAGIC1(b));
set_ne32(b + b_size_in_bytes, MAGIC2(b));
set_ne32(b + b_size_in_bytes + 4, stats.global_alloc_counter);
2022-09-15 07:14:38 +08:00
}
#if !defined(__SANITIZE_ADDRESS__) && 0
fill(0, b_size_in_bytes, (rand() & 0xff) | 1); // debug
(void) VALGRIND_MAKE_MEM_UNDEFINED(b, b_size_in_bytes);
#endif
stats.global_alloc_counter += 1;
stats.global_total_bytes += b_size_in_bytes;
stats.global_total_active_bytes += b_size_in_bytes;
}
void MemBuffer::dealloc() {
if (b != nullptr) {
debug_set(debug.last_return_address_dealloc, upx_return_address());
checkState();
stats.global_total_active_bytes -= b_size_in_bytes;
if (use_simple_mcheck()) {
// clear magic constants
2022-09-15 07:14:38 +08:00
set_ne32(b - 8, 0);
set_ne32(b - 4, 0);
set_ne32(b + b_size_in_bytes, 0);
set_ne32(b + b_size_in_bytes + 4, 0);
//
::free(b - 16);
} else
::free(b);
b = nullptr;
b_size_in_bytes = 0;
} else {
assert(b_size_in_bytes == 0);
}
2000-05-20 00:04:55 +08:00
}
2022-09-15 07:14:38 +08:00
/*************************************************************************
//
**************************************************************************/
TEST_CASE("MemBuffer") {
MemBuffer mb;
CHECK_THROWS(mb.checkState());
CHECK_THROWS(mb.alloc(0x30000000 + 1));
CHECK(raw_bytes(mb, 0) == nullptr);
CHECK_THROWS(raw_bytes(mb, 1));
mb.alloc(64);
mb.checkState();
CHECK(raw_bytes(mb, 64) != nullptr);
CHECK(raw_bytes(mb, 64) == mb.getVoidPtr());
CHECK_THROWS(raw_bytes(mb, 65));
2023-01-25 04:52:10 +08:00
CHECK_NOTHROW(mb + 64);
CHECK_NOTHROW(64 + mb);
CHECK_THROWS(mb + 65);
CHECK_THROWS(65 + mb);
2022-09-15 07:14:38 +08:00
if (use_simple_mcheck()) {
upx_byte *b = raw_bytes(mb, 0);
unsigned magic1 = get_ne32(b - 4);
set_ne32(b - 4, magic1 ^ 1);
CHECK_THROWS(mb.checkState());
set_ne32(b - 4, magic1);
mb.checkState();
}
}
2023-01-25 04:52:10 +08:00
TEST_CASE("MemBuffer::getSizeForCompression") {
CHECK_THROWS(MemBuffer::getSizeForCompression(0));
CHECK_THROWS(MemBuffer::getSizeForDecompression(0));
CHECK(MemBuffer::getSizeForCompression(1) == 320);
CHECK(MemBuffer::getSizeForCompression(256) == 576);
CHECK(MemBuffer::getSizeForCompression(1024) == 1408);
// CHECK(MemBuffer::getSizeForCompression(1024 * 1024) == 0); // TODO
// CHECK(MemBuffer::getSizeForCompression(UPX_RSIZE_MAX) == 0); // TODO
}
/* vim:set ts=4 sw=4 et: */