feature usm: reserve heap extended in 57 bit address space

use mocked mmap function in unit tests

Related-To: NEO-7665
Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
Mateusz Jablonski
2023-03-22 14:32:27 +00:00
committed by Compute-Runtime-Automation
parent ca02bbba4b
commit 7e5e27f0b9
12 changed files with 122 additions and 84 deletions

View File

@@ -47,17 +47,14 @@ static void reserveLow48BitRangeWithRetry(OSMemory *osMemory, OSMemory::Reserved
};
}
static void reserveHigh48BitRangeWithMemoryMapsParse(OSMemory *osMemory, OSMemory::ReservedCpuAddressRange &reservedCpuAddressRange) {
constexpr uint64_t high48BitAreaBase = maxNBitValue(47) + 1; // 0x800000000000
constexpr uint64_t high48BitAreaTop = maxNBitValue(48); // 0xFFFFFFFFFFFF
uint64_t reservationSize = 1024 * MemoryConstants::gigaByte; // 1 TB
uint64_t reservationBase = high48BitAreaBase;
static void reserveRangeWithMemoryMapsParse(OSMemory *osMemory, OSMemory::ReservedCpuAddressRange &reservedCpuAddressRange, uint64_t areaBase, uint64_t areaTop, uint64_t reservationSize) {
uint64_t reservationBase = areaBase;
reservedCpuAddressRange = osMemory->reserveCpuAddressRange(reinterpret_cast<void *>(reservationBase), static_cast<size_t>(reservationSize), MemoryConstants::pageSize64k);
if (reservedCpuAddressRange.alignedPtr != nullptr) {
uint64_t alignedPtrU64 = castToUint64(reservedCpuAddressRange.alignedPtr);
if (alignedPtrU64 >= high48BitAreaBase && alignedPtrU64 + reservationSize < high48BitAreaTop) {
if (alignedPtrU64 >= areaBase && alignedPtrU64 + reservationSize < areaTop) {
return;
} else {
osMemory->releaseCpuAddressRange(reservedCpuAddressRange);
@@ -68,8 +65,8 @@ static void reserveHigh48BitRangeWithMemoryMapsParse(OSMemory *osMemory, OSMemor
OSMemory::MemoryMaps memoryMaps;
osMemory->getMemoryMaps(memoryMaps);
for (size_t i = 0; reservationBase < high48BitAreaTop && i < memoryMaps.size(); ++i) {
if (memoryMaps[i].end < high48BitAreaBase) {
for (size_t i = 0; reservationBase < areaTop && i < memoryMaps.size(); ++i) {
if (memoryMaps[i].end < areaBase) {
continue;
}
@@ -79,16 +76,30 @@ static void reserveHigh48BitRangeWithMemoryMapsParse(OSMemory *osMemory, OSMemor
reservationBase = memoryMaps[i].end;
}
if (reservationBase + reservationSize < high48BitAreaTop) {
if (reservationBase + reservationSize < areaTop) {
reservedCpuAddressRange = osMemory->reserveCpuAddressRange(reinterpret_cast<void *>(reservationBase), static_cast<size_t>(reservationSize), MemoryConstants::pageSize64k);
}
}
GfxPartition::GfxPartition(OSMemory::ReservedCpuAddressRange &sharedReservedCpuAddressRange) : reservedCpuAddressRange(sharedReservedCpuAddressRange), osMemory(OSMemory::create()) {}
static void reserveHigh48BitRangeWithMemoryMapsParse(OSMemory *osMemory, OSMemory::ReservedCpuAddressRange &reservedCpuAddressRange) {
constexpr uint64_t high48BitAreaBase = maxNBitValue(47) + 1; // 0x800000000000
constexpr uint64_t high48BitAreaTop = maxNBitValue(48); // 0xFFFFFFFFFFFF
uint64_t reservationSize = 1024 * MemoryConstants::gigaByte; // 1 TB
reserveRangeWithMemoryMapsParse(osMemory, reservedCpuAddressRange, high48BitAreaBase, high48BitAreaTop, reservationSize);
}
static void reserve57BitRangeWithMemoryMapsParse(OSMemory *osMemory, OSMemory::ReservedCpuAddressRange &reservedCpuAddressRange, uint64_t reservationSize) {
constexpr uint64_t areaBase = maxNBitValue(48) + 1;
constexpr uint64_t areaTop = maxNBitValue(57);
reserveRangeWithMemoryMapsParse(osMemory, reservedCpuAddressRange, areaBase, areaTop, reservationSize);
}
GfxPartition::GfxPartition(OSMemory::ReservedCpuAddressRange &reservedCpuAddressRangeForHeapSvm) : reservedCpuAddressRangeForHeapSvm(reservedCpuAddressRangeForHeapSvm), osMemory(OSMemory::create()) {}
GfxPartition::~GfxPartition() {
osMemory->releaseCpuAddressRange(reservedCpuAddressRange);
reservedCpuAddressRange = {0};
osMemory->releaseCpuAddressRange(reservedCpuAddressRangeForHeapSvm);
reservedCpuAddressRangeForHeapSvm = {};
osMemory->releaseCpuAddressRange(reservedCpuAddressRangeForHeapExtended);
}
void GfxPartition::Heap::init(uint64_t base, uint64_t size, size_t allocationAlignment) {
@@ -240,19 +251,19 @@ bool GfxPartition::init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToRe
gfxBase = maxNBitValue(48 - 1) + 1;
heapInit(HeapIndex::HEAP_SVM, 0ull, gfxBase);
} else if (gpuAddressSpace == maxNBitValue(47)) {
if (reservedCpuAddressRange.alignedPtr == nullptr) {
if (reservedCpuAddressRangeForHeapSvm.alignedPtr == nullptr) {
if (cpuAddressRangeSizeToReserve == 0) {
return false;
}
reservedCpuAddressRange = osMemory->reserveCpuAddressRange(cpuAddressRangeSizeToReserve, GfxPartition::heapGranularity);
if (reservedCpuAddressRange.originalPtr == nullptr) {
reservedCpuAddressRangeForHeapSvm = osMemory->reserveCpuAddressRange(cpuAddressRangeSizeToReserve, GfxPartition::heapGranularity);
if (reservedCpuAddressRangeForHeapSvm.originalPtr == nullptr) {
return false;
}
if (!isAligned<GfxPartition::heapGranularity>(reservedCpuAddressRange.alignedPtr)) {
if (!isAligned<GfxPartition::heapGranularity>(reservedCpuAddressRangeForHeapSvm.alignedPtr)) {
return false;
}
}
gfxBase = reinterpret_cast<uint64_t>(reservedCpuAddressRange.alignedPtr);
gfxBase = reinterpret_cast<uint64_t>(reservedCpuAddressRangeForHeapSvm.alignedPtr);
gfxTop = gfxBase + cpuAddressRangeSizeToReserve;
heapInit(HeapIndex::HEAP_SVM, 0ull, gpuAddressSpace + 1);
} else if (gpuAddressSpace < maxNBitValue(47)) {
@@ -332,27 +343,37 @@ bool GfxPartition::initAdditionalRange(uint32_t cpuVirtualAddressSize, uint64_t
return false;
}
bool isExtendedHeapInitialized = false;
if (cpuVirtualAddressSize == 57 && CpuInfo::getInstance().isCpuFlagPresent("la57")) {
// Always reserve 48 bit window on 57 bit CPU
if (reservedCpuAddressRange.alignedPtr == nullptr) {
reserveHigh48BitRangeWithMemoryMapsParse(osMemory.get(), reservedCpuAddressRange);
if (reservedCpuAddressRangeForHeapSvm.alignedPtr == nullptr) {
reserveHigh48BitRangeWithMemoryMapsParse(osMemory.get(), reservedCpuAddressRangeForHeapSvm);
if (reservedCpuAddressRange.alignedPtr == nullptr) {
reserveLow48BitRangeWithRetry(osMemory.get(), reservedCpuAddressRange);
if (reservedCpuAddressRangeForHeapSvm.alignedPtr == nullptr) {
reserveLow48BitRangeWithRetry(osMemory.get(), reservedCpuAddressRangeForHeapSvm);
}
if (reservedCpuAddressRange.alignedPtr == nullptr) {
if (reservedCpuAddressRangeForHeapSvm.alignedPtr == nullptr) {
return false;
}
}
gfxBase = castToUint64(reservedCpuAddressRange.alignedPtr);
gfxTop = gfxBase + reservedCpuAddressRange.sizeToReserve;
gfxBase = castToUint64(reservedCpuAddressRangeForHeapSvm.alignedPtr);
gfxTop = gfxBase + reservedCpuAddressRangeForHeapSvm.sizeToReserve;
if (gpuAddressSpace == maxNBitValue(57)) {
heapInit(HeapIndex::HEAP_SVM, 0ull, maxNBitValue(57 - 1) + 1);
} else {
heapInit(HeapIndex::HEAP_SVM, 0ull, maxNBitValue(48) + 1);
}
if (gpuAddressSpace == maxNBitValue(57)) {
uint64_t heapExtendedSize = 1024 * MemoryConstants::gigaByte; // 1 TB
reserve57BitRangeWithMemoryMapsParse(osMemory.get(), reservedCpuAddressRangeForHeapExtended, heapExtendedSize);
if (reservedCpuAddressRangeForHeapExtended.alignedPtr) {
heapInit(HeapIndex::HEAP_EXTENDED, castToUint64(reservedCpuAddressRangeForHeapExtended.alignedPtr), heapExtendedSize);
isExtendedHeapInitialized = true;
}
}
} else {
// On 48 bit CPU this range is reserved for OS usage, do not reserve
gfxBase = maxNBitValue(48 - 1) + 1; // 0x800000000000
@@ -361,7 +382,7 @@ bool GfxPartition::initAdditionalRange(uint32_t cpuVirtualAddressSize, uint64_t
}
// Init HEAP_EXTENDED only for 57 bit GPU
if (gpuAddressSpace == maxNBitValue(57)) {
if (gpuAddressSpace == maxNBitValue(57) && !isExtendedHeapInitialized) {
// Split HEAP_EXTENDED among root devices (like HEAP_STANDARD64K)
auto heapExtendedSize = alignDown((maxNBitValue(48) + 1) / numRootDevices, GfxPartition::heapGranularity);
heapInit(HeapIndex::HEAP_EXTENDED, maxNBitValue(57 - 1) + 1 + rootDeviceIndex * heapExtendedSize, heapExtendedSize);

View File

@@ -35,7 +35,7 @@ enum class HeapIndex : uint32_t {
class GfxPartition {
public:
GfxPartition(OSMemory::ReservedCpuAddressRange &sharedReservedCpuAddressRange);
GfxPartition(OSMemory::ReservedCpuAddressRange &reservedCpuAddressRangeForHeapSvm);
MOCKABLE_VIRTUAL ~GfxPartition();
bool init(uint64_t gpuAddressSpace, size_t cpuAddressRangeSizeToReserve, uint32_t rootDeviceIndex, size_t numRootDevices) {
@@ -125,8 +125,8 @@ class GfxPartition {
std::array<Heap, static_cast<uint32_t>(HeapIndex::TOTAL_HEAPS)> heaps;
OSMemory::ReservedCpuAddressRange &reservedCpuAddressRange;
OSMemory::ReservedCpuAddressRange &reservedCpuAddressRangeForHeapSvm;
OSMemory::ReservedCpuAddressRange reservedCpuAddressRangeForHeapExtended{};
std::unique_ptr<OSMemory> osMemory;
};
} // namespace NEO

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2019-2021 Intel Corporation
* Copyright (C) 2019-2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@@ -8,6 +8,7 @@
#include "shared/source/os_interface/linux/os_memory_linux.h"
#include "shared/source/os_interface/linux/os_inc.h"
#include "shared/source/os_interface/linux/sys_calls.h"
#include <cinttypes>
#include <fstream>
@@ -28,11 +29,11 @@ void OSMemoryLinux::osReleaseCpuAddressRange(void *reservedCpuAddressRange, size
}
void *OSMemoryLinux::mmapWrapper(void *addr, size_t size, int prot, int flags, int fd, off_t off) {
return mmap(addr, size, prot, flags, fd, off);
return SysCalls::mmap(addr, size, prot, flags, fd, off);
}
int OSMemoryLinux::munmapWrapper(void *addr, size_t size) {
return munmap(addr, size);
return SysCalls::munmap(addr, size);
}
void OSMemoryLinux::getMemoryMaps(MemoryMaps &memoryMaps) {

View File

@@ -26,8 +26,8 @@ int poll(struct pollfd *pollFd, unsigned long int numberOfFds, int timeout);
int fstat(int fd, struct stat *buf);
ssize_t pread(int fd, void *buf, size_t count, off_t offset);
ssize_t pwrite(int fd, const void *buf, size_t count, off_t offset);
void *mmap(void *addr, size_t size, int prot, int flags, int fd, off_t off);
int munmap(void *addr, size_t size);
void *mmap(void *addr, size_t size, int prot, int flags, int fd, off_t off) noexcept;
int munmap(void *addr, size_t size) noexcept;
ssize_t read(int fd, void *buf, size_t count);
int fcntl(int fd, int cmd);
int fcntl(int fd, int cmd, int arg);

View File

@@ -81,11 +81,11 @@ ssize_t pwrite(int fd, const void *buf, size_t count, off_t offset) {
return ::pwrite(fd, buf, count, offset);
}
void *mmap(void *addr, size_t size, int prot, int flags, int fd, off_t off) {
void *mmap(void *addr, size_t size, int prot, int flags, int fd, off_t off) noexcept {
return ::mmap(addr, size, prot, flags, fd, off);
}
int munmap(void *addr, size_t size) {
int munmap(void *addr, size_t size) noexcept {
return ::munmap(addr, size);
}

View File

@@ -23,14 +23,13 @@ off_t lseekReturn = 4096u;
std::atomic<int> lseekCalledCount(0);
std::atomic<int> closeInputFd(0);
std::atomic<int> closeCalledCount(0);
std::vector<void *> mmapVector(64);
TestedDrmMemoryManager::TestedDrmMemoryManager(ExecutionEnvironment &executionEnvironment) : MemoryManagerCreate(gemCloseWorkerMode::gemCloseWorkerInactive,
false,
false,
executionEnvironment) {
this->mmapFunction = &mmapMock;
this->munmapFunction = &munmapMock;
this->mmapFunction = SysCalls::mmap;
this->munmapFunction = SysCalls::munmap;
this->lseekFunction = &lseekMock;
this->closeFunction = &closeMock;
lseekReturn = 4096;
@@ -48,8 +47,8 @@ TestedDrmMemoryManager::TestedDrmMemoryManager(bool enableLocalMemory,
allowForcePin,
validateHostPtrMemory,
executionEnvironment) {
this->mmapFunction = &mmapMock;
this->munmapFunction = &munmapMock;
this->mmapFunction = SysCalls::mmap;
this->munmapFunction = SysCalls::munmap;
this->lseekFunction = &lseekMock;
this->closeFunction = &closeMock;
lseekReturn = 4096;

View File

@@ -6,9 +6,9 @@
*/
#pragma once
#include "shared/source/helpers/aligned_memory.h"
#include "shared/source/os_interface/linux/drm_gem_close_worker.h"
#include "shared/source/os_interface/linux/drm_memory_manager.h"
#include "shared/source/os_interface/linux/sys_calls.h"
#include "shared/test/common/mocks/mock_memory_manager.h"
#include "shared/test/common/os_interface/linux/device_command_stream_fixture.h"
@@ -19,30 +19,6 @@ extern off_t lseekReturn;
extern std::atomic<int> lseekCalledCount;
extern std::atomic<int> closeInputFd;
extern std::atomic<int> closeCalledCount;
extern std::vector<void *> mmapVector;
inline void *mmapMock(void *addr, size_t length, int prot, int flags, int fd, off_t offset) noexcept {
if (addr) {
return addr;
}
void *ptr = nullptr;
if (length > 0) {
ptr = alignedMalloc(length, MemoryConstants::pageSize64k);
mmapVector.push_back(ptr);
}
return ptr;
}
inline int munmapMock(void *addr, size_t length) noexcept {
if (length > 0) {
auto ptrIt = std::find(mmapVector.begin(), mmapVector.end(), addr);
if (ptrIt != mmapVector.end()) {
mmapVector.erase(ptrIt);
alignedFree(addr);
}
}
return 0;
}
inline off_t lseekMock(int fd, off_t offset, int whence) noexcept {
lseekCalledCount++;

View File

@@ -7,6 +7,7 @@
#include "shared/test/common/os_interface/linux/sys_calls_linux_ult.h"
#include "shared/source/helpers/aligned_memory.h"
#include "shared/source/helpers/string.h"
#include "shared/source/os_interface/linux/drm_wrappers.h"
#include "shared/source/os_interface/linux/i915.h"
@@ -25,6 +26,7 @@
#include <system_error>
namespace NEO {
std::vector<void *> mmapVector(64);
namespace SysCalls {
uint32_t closeFuncCalled = 0u;
int closeFuncArgPassed = 0;
@@ -187,13 +189,29 @@ ssize_t pwrite(int fd, const void *buf, size_t count, off_t offset) {
return 0;
}
void *mmap(void *addr, size_t size, int prot, int flags, int fd, off_t off) {
void *mmap(void *addr, size_t size, int prot, int flags, int fd, off_t off) noexcept {
mmapFuncCalled++;
return 0;
if (addr) {
return addr;
}
void *ptr = nullptr;
if (size > 0) {
ptr = alignedMalloc(size, MemoryConstants::pageSize64k);
if (!ptr) {
return reinterpret_cast<void *>(0x1000);
}
mmapVector.push_back(ptr);
}
return ptr;
}
int munmap(void *addr, size_t size) {
int munmap(void *addr, size_t size) noexcept {
munmapFuncCalled++;
auto ptrIt = std::find(mmapVector.begin(), mmapVector.end(), addr);
if (ptrIt != mmapVector.end()) {
mmapVector.erase(ptrIt);
alignedFree(addr);
}
return 0;
}

View File

@@ -1,10 +1,11 @@
/*
* Copyright (C) 2021 Intel Corporation
* Copyright (C) 2021-2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
*/
#include "shared/source/helpers/ptr_math.h"
#include "shared/source/os_interface/windows/os_memory_win.h"
#include "shared/test/common/mocks/mock_wddm.h"
@@ -15,6 +16,9 @@ BOOL WINAPI ULTVirtualFree(LPVOID ptr, SIZE_T size, DWORD flags) {
}
LPVOID WINAPI ULTVirtualAlloc(LPVOID inPtr, SIZE_T size, DWORD flags, DWORD type) {
if (castToUint64(inPtr) > maxNBitValue(48)) {
return inPtr;
}
return reinterpret_cast<LPVOID>(virtualAllocAddress);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2019-2022 Intel Corporation
* Copyright (C) 2019-2023 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@@ -609,12 +609,12 @@ TEST_P(GfxPartitionOn57bTest, given57bitCpuAddressWidthAndLa57IsNotPresentWhenIn
verifyHeaps(0x800000000000, 0x1000000000000, 0x7FFFFFFFFFFF, gpuAddressSpace == 57);
}
TEST_P(GfxPartitionOn57bTest, given57bitCpuAddressWidthAndLa57IsPresentWhenInitializingGfxPartitionThenReserve48bitSpaceForDriverAllocations) {
TEST_F(GfxPartitionOn57bTest, given57bitCpuAddressWidthAndLa57IsPresentWhenInitializingGfxPartitionThenReserve48bitSpaceForDriverAllocations) {
if (is32bit) {
GTEST_SKIP();
}
auto gpuAddressSpace = GetParam();
auto gpuAddressSpace = 48;
// 57 bit CPU VA, la57 flag is present - reserve high or low CPU address range depending of memory maps
CpuInfoOverrideVirtualAddressSizeAndFlags overrideCpuInfo(57, "la57");
@@ -802,12 +802,12 @@ TEST_P(GfxPartitionOn57bTest, given57bitCpuAddressWidthAndLa57IsPresentWhenIniti
EXPECT_FALSE(gfxPartition->init(maxNBitValue(gpuAddressSpace), 0, 0, 1));
}
TEST_P(GfxPartitionOn57bTest, given57bitCpuAddressWidthWhenInitializingMultipleGfxPartitionsThenReserve48bitSpaceForDriverAllocationsOnlyOnce) {
TEST_F(GfxPartitionOn57bTest, given48bitGpuAddressSpaceAnd57bitCpuAddressWidthWhenInitializingMultipleGfxPartitionsThenReserveSpaceForSvmHeapOnlyOnce) {
if (is32bit) {
GTEST_SKIP();
}
auto gpuAddressSpace = GetParam();
auto gpuAddressSpace = 48;
// 57 bit CPU VA, la57 is present - reserve high or low CPU address range depending of memory maps
CpuInfoOverrideVirtualAddressSizeAndFlags overrideCpuInfo(57, "la57");
@@ -823,7 +823,26 @@ TEST_P(GfxPartitionOn57bTest, given57bitCpuAddressWidthWhenInitializingMultipleG
EXPECT_EQ(1u, static_cast<MockOsMemory *>(gfxPartitions[0]->osMemory.get())->getReserveCount());
}
INSTANTIATE_TEST_SUITE_P(GfxPartitionOn57bTest, GfxPartitionOn57bTest, ::testing::Values(48, 57));
TEST_F(GfxPartitionOn57bTest, given57bitGpuAddressSpaceAnd57bitCpuAddressWidthWhenInitializingMultipleGfxPartitionsThenReserveSpaceForSvmHeapAndExtendedHeapsPerGfxPartition) {
if (is32bit) {
GTEST_SKIP();
}
auto gpuAddressSpace = 57;
// 57 bit CPU VA, la57 is present - reserve high or low CPU address range depending of memory maps
CpuInfoOverrideVirtualAddressSizeAndFlags overrideCpuInfo(57, "la57");
OSMemory::ReservedCpuAddressRange reservedCpuAddressRange;
std::vector<std::unique_ptr<MockGfxPartition>> gfxPartitions;
for (int i = 0; i < 10; ++i) {
gfxPartitions.push_back(std::make_unique<MockGfxPartition>(reservedCpuAddressRange));
gfxPartitions[i]->osMemory.reset(new MockOsMemory);
EXPECT_TRUE(gfxPartitions[i]->init(maxNBitValue(gpuAddressSpace), 0, i, 10));
}
EXPECT_EQ(11u, static_cast<MockOsMemory *>(gfxPartitions[0]->osMemory.get())->getReserveCount());
}
TEST(GfxPartitionTest, givenGpuAddressSpaceIs57BitAndSeveralRootDevicesThenHeapExtendedIsSplitted) {
if (is32bit) {
@@ -840,11 +859,10 @@ TEST(GfxPartitionTest, givenGpuAddressSpaceIs57BitAndSeveralRootDevicesThenHeapE
MockGfxPartition gfxPartition;
EXPECT_TRUE(gfxPartition.init(maxNBitValue(57), reservedCpuAddressRangeSize, rootDeviceIndex, numRootDevices));
auto heapExtendedTotalSize = maxNBitValue(48) + 1;
auto heapExtendedSize = alignDown(heapExtendedTotalSize / numRootDevices, GfxPartition::heapGranularity);
auto heapExtendedSize = 1024 * MemoryConstants::gigaByte;
EXPECT_EQ(heapExtendedSize, gfxPartition.getHeapSize(HeapIndex::HEAP_EXTENDED));
EXPECT_EQ(maxNBitValue(56) + 1 + rootDeviceIndex * heapExtendedSize, gfxPartition.getHeapBase(HeapIndex::HEAP_EXTENDED));
EXPECT_LT(maxNBitValue(48), gfxPartition.getHeapBase(HeapIndex::HEAP_EXTENDED));
}
{

View File

@@ -970,8 +970,8 @@ TEST_F(DrmMemoryManagerLocalMemoryPrelimTest, givenAlignmentAndSizeWhenMmapRetur
EXPECT_EQ(3u, munmapCalledCount);
munmapCalledCount = 0u;
memoryManager->mmapFunction = &mmapMock;
memoryManager->munmapFunction = &munmapMock;
memoryManager->mmapFunction = SysCalls::mmap;
memoryManager->munmapFunction = SysCalls::munmap;
}
TEST_F(DrmMemoryManagerLocalMemoryPrelimTest, givenAlignmentAndSizeWhenMmapReturnsAlignedThenCreateAllocWithAlignmentUnmapOneUnalignedPart) {
@@ -1012,8 +1012,8 @@ TEST_F(DrmMemoryManagerLocalMemoryPrelimTest, givenAlignmentAndSizeWhenMmapRetur
EXPECT_EQ(2u, munmapCalledCount);
munmapCalledCount = 0u;
memoryManager->mmapFunction = &mmapMock;
memoryManager->munmapFunction = &munmapMock;
memoryManager->mmapFunction = SysCalls::mmap;
memoryManager->munmapFunction = SysCalls::munmap;
}
TEST_F(DrmMemoryManagerLocalMemoryPrelimTest, givenInvalidCacheRegionWhenMmapReturnsUnalignedPointerThenReleaseUnalignedPartsEarly) {
@@ -1052,8 +1052,8 @@ TEST_F(DrmMemoryManagerLocalMemoryPrelimTest, givenInvalidCacheRegionWhenMmapRet
EXPECT_EQ(2u, munmapCalledCount);
munmapCalledCount = 0u;
memoryManager->mmapFunction = &mmapMock;
memoryManager->munmapFunction = &munmapMock;
memoryManager->mmapFunction = SysCalls::mmap;
memoryManager->munmapFunction = SysCalls::munmap;
}
TEST_F(DrmMemoryManagerLocalMemoryPrelimTest, givenMemoryInfoAndFailedMmapOffsetWhenAllocateWithAlignmentThenNullptr) {

View File

@@ -7,6 +7,7 @@
#include "shared/source/execution_environment/execution_environment.h"
#include "shared/source/gmm_helper/gmm_helper.h"
#include "shared/source/helpers/aligned_memory.h"
#include "shared/source/helpers/heap_assigner.h"
#include "shared/source/memory_manager/gfx_partition.h"
#include "shared/source/memory_manager/memory_banks.h"