upx/src/mem.cpp

238 lines
6.8 KiB
C++
Raw Normal View History

2000-05-20 00:04:55 +08:00
/* mem.cpp --
This file is part of the UPX executable compressor.
2022-08-20 06:47:00 +08:00
Copyright (C) 1996-2022 Markus Franz Xaver Johannes Oberhumer
Copyright (C) 1996-2022 Laszlo Molnar
All Rights Reserved.
2000-05-20 00:04:55 +08:00
UPX and the UCL library are free software; you can redistribute them
and/or modify them under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING.
If not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
Markus F.X.J. Oberhumer Laszlo Molnar
2016-09-28 18:25:01 +08:00
<markus@oberhumer.com> <ezerotven+github@gmail.com>
2000-05-20 00:04:55 +08:00
*/
#include "conf.h"
#include "mem.h"
/*************************************************************************
2016-10-06 18:31:03 +08:00
// bool use_simple_mcheck()
2000-05-20 00:04:55 +08:00
**************************************************************************/
#if defined(__SANITIZE_ADDRESS__)
__acc_static_forceinline constexpr bool use_simple_mcheck() { return false; }
#elif (WITH_VALGRIND) && defined(RUNNING_ON_VALGRIND)
2016-10-06 18:31:03 +08:00
static int use_simple_mcheck_flag = -1;
__acc_static_noinline void use_simple_mcheck_init()
{
2016-10-06 18:31:03 +08:00
use_simple_mcheck_flag = 1;
if (RUNNING_ON_VALGRIND) {
2016-10-06 18:31:03 +08:00
use_simple_mcheck_flag = 0;
//fprintf(stderr, "upx: detected RUNNING_ON_VALGRIND\n");
}
}
2016-10-06 18:31:03 +08:00
__acc_static_forceinline bool use_simple_mcheck()
{
2016-10-06 18:31:03 +08:00
if __acc_unlikely(use_simple_mcheck_flag < 0)
use_simple_mcheck_init();
return (bool) use_simple_mcheck_flag;
}
#else
__acc_static_forceinline constexpr bool use_simple_mcheck() { return true; }
#endif
/*************************************************************************
//
**************************************************************************/
MemBuffer::MemBuffer(upx_uint64_t size) :
2020-12-08 12:40:17 +08:00
b(nullptr), b_size(0)
2000-05-20 00:04:55 +08:00
{
alloc(size);
2000-05-20 00:04:55 +08:00
}
MemBuffer::~MemBuffer()
{
this->dealloc();
2000-05-20 00:04:55 +08:00
}
// similar to BoundedPtr, except checks only at creation
unsigned char *MemBuffer::subref(char const *errfmt, unsigned skip, unsigned take)
{
if ((take + skip) < take // wrap-around
|| (take + skip) > b_size // overrun
) {
char buf[100]; snprintf(buf, sizeof(buf), errfmt, skip, take);
throwCantPack(buf);
}
return &b[skip];
}
void MemBuffer::dealloc()
2000-05-20 00:04:55 +08:00
{
2020-12-08 12:40:17 +08:00
if (b != nullptr)
{
checkState();
2016-10-06 18:31:03 +08:00
if (use_simple_mcheck())
{
// remove magic constants
set_be32(b - 8, 0);
set_be32(b - 4, 0);
set_be32(b + b_size, 0);
set_be32(b + b_size + 4, 0);
//
::free(b - 16);
}
else
::free(b);
2020-12-08 12:40:17 +08:00
b = nullptr;
b_size = 0;
}
else
assert(b_size == 0);
2000-05-20 00:04:55 +08:00
}
static unsigned width(unsigned x)
{
unsigned w = 0;
if ((~0u << 16) & x) { w += 16; x >>= 16; }
if ((~0u << 8) & x) { w += 8; x >>= 8; }
if ((~0u << 4) & x) { w += 4; x >>= 4; }
if ((~0u << 2) & x) { w += 2; x >>= 2; }
if ((~0u << 1) & x) { w += 1; x >>= 1; }
return 1+ w;
}
static unsigned umax(unsigned a, unsigned b)
{
return (a >= b) ? a : b;
}
unsigned MemBuffer::getSizeForCompression(unsigned uncompressed_size, unsigned extra)
2000-05-20 00:04:55 +08:00
{
size_t const z = uncompressed_size; // fewer keystrokes and display columns
size_t bytes = mem_size(1, z, extra);
size_t const w = umax(8, width(z -1)); // ignore tiny offsets
bytes = 256 + // safety?
umax(bytes + z/8, // All literal: 1 bit overhead per literal byte
// Worst matching: All match at max_offset, which implies 3==min_match
// NRV2B: 1 byte plus 2 bits per width exceeding 8 ("ss11")
umax((z/3 * (8+ 2*(w - 8)/1))/8,
// NRV2E: 1 byte plus 3 bits per pair of width exceeding 7 ("ss12")
(z/3 * (8+ 3*(w - 7)/2))/8 ) );
return ACC_ICONV(unsigned, bytes);
2000-05-20 00:04:55 +08:00
}
unsigned MemBuffer::getSizeForUncompression(unsigned uncompressed_size, unsigned extra)
2000-05-20 00:04:55 +08:00
{
size_t bytes = mem_size(1, uncompressed_size, extra);
// INFO: 3 bytes are the allowed overrun for the i386 asm_fast decompressors
#if (ACC_ARCH_I386)
bytes += 3;
#endif
return ACC_ICONV(unsigned, bytes);
}
void MemBuffer::allocForCompression(unsigned uncompressed_size, unsigned extra)
{
unsigned size = getSizeForCompression(uncompressed_size, extra);
alloc(size);
}
void MemBuffer::allocForUncompression(unsigned uncompressed_size, unsigned extra)
{
unsigned size = getSizeForUncompression(uncompressed_size, extra);
alloc(size);
2000-05-20 00:04:55 +08:00
}
void MemBuffer::fill(unsigned off, unsigned len, int value)
{
checkState();
assert((int)off >= 0);
assert((int)len >= 0);
assert(off <= b_size);
assert(len <= b_size);
assert(off + len <= b_size);
if (len > 0)
memset(b + off, value, len);
}
/*************************************************************************
//
**************************************************************************/
2000-05-20 00:04:55 +08:00
#define PTR(p) ((unsigned) ((upx_uintptr_t)(p) & 0xffffffff))
#define MAGIC1(p) (PTR(p) ^ 0xfefdbeeb)
#define MAGIC2(p) (PTR(p) ^ 0xfefdbeeb ^ 0x80024001)
2000-05-20 00:04:55 +08:00
unsigned MemBuffer::global_alloc_counter = 0;
void MemBuffer::checkState() const
2000-05-20 00:04:55 +08:00
{
if (!b)
throwInternalError("block not allocated");
2016-10-06 18:31:03 +08:00
if (use_simple_mcheck())
{
if (get_be32(b - 4) != MAGIC1(b))
throwInternalError("memory clobbered before allocated block 1");
if (get_be32(b - 8) != b_size)
throwInternalError("memory clobbered before allocated block 2");
if (get_be32(b + b_size) != MAGIC2(b))
throwInternalError("memory clobbered past end of allocated block");
}
assert((int)b_size > 0);
2000-05-20 00:04:55 +08:00
}
void MemBuffer::alloc(upx_uint64_t size)
2000-05-20 00:04:55 +08:00
{
// NOTE: we don't automatically free a used buffer
2020-12-08 12:40:17 +08:00
assert(b == nullptr);
assert(b_size == 0);
//
assert(size > 0);
2016-10-06 18:31:03 +08:00
size_t bytes = mem_size(1, size, use_simple_mcheck() ? 32 : 0);
unsigned char *p = (unsigned char *) malloc(bytes);
if (!p)
2007-05-08 21:28:35 +08:00
throwOutOfMemoryException();
b_size = ACC_ICONV(unsigned, size);
2016-10-06 18:31:03 +08:00
if (use_simple_mcheck())
{
b = p + 16;
// store magic constants to detect buffer overruns
set_be32(b - 8, b_size);
set_be32(b - 4, MAGIC1(b));
set_be32(b + b_size, MAGIC2(b));
set_be32(b + b_size + 4, global_alloc_counter++);
}
else
b = p ;
//fill(0, b_size, (rand() & 0xff) | 1); // debug
2000-05-20 00:04:55 +08:00
}
/* vim:set ts=4 sw=4 et: */