mirror of
https://github.com/intel/compute-runtime.git
synced 2025-12-19 16:24:18 +08:00
Fix typos:
preffered -> preferred deffered -> deferred Change-Id: I1b87861590c273d7fcda5bf0c5a772bf36e1bc74 Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
committed by
sys_ocldev
parent
c43759cc48
commit
c6239968a5
@@ -11,6 +11,6 @@
|
|||||||
|
|
||||||
using namespace NEO;
|
using namespace NEO;
|
||||||
|
|
||||||
TEST(DeferredDeleterHelper, GivenDefferedDeleterHelperWhenCheckIFDeferrDeleterIsEnabledThenFalseIsReturned) {
|
TEST(DeferredDeleterHelper, GivenDeferredDeleterHelperWhenCheckIFDeferrDeleterIsEnabledThenFalseIsReturned) {
|
||||||
EXPECT_FALSE(isDeferredDeleterEnabled());
|
EXPECT_FALSE(isDeferredDeleterEnabled());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ struct MetricGroupDomains {
|
|||||||
bool isActivated(const zet_metric_group_handle_t hMetricGroup);
|
bool isActivated(const zet_metric_group_handle_t hMetricGroup);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool activateMetricGroupDeffered(const zet_metric_group_handle_t hMetricGroup);
|
bool activateMetricGroupDeferred(const zet_metric_group_handle_t hMetricGroup);
|
||||||
bool activateEventMetricGroup(const zet_metric_group_handle_t hMetricGroup);
|
bool activateEventMetricGroup(const zet_metric_group_handle_t hMetricGroup);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@@ -229,7 +229,7 @@ ze_result_t MetricGroupDomains::activateDeferred(const uint32_t count,
|
|||||||
DEBUG_BREAK_IF(!phMetricGroups[i]);
|
DEBUG_BREAK_IF(!phMetricGroups[i]);
|
||||||
|
|
||||||
// Try to associate it with a domain (oa, ...).
|
// Try to associate it with a domain (oa, ...).
|
||||||
if (!activateMetricGroupDeffered(phMetricGroups[i])) {
|
if (!activateMetricGroupDeferred(phMetricGroups[i])) {
|
||||||
return ZE_RESULT_ERROR_UNKNOWN;
|
return ZE_RESULT_ERROR_UNKNOWN;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -237,7 +237,7 @@ ze_result_t MetricGroupDomains::activateDeferred(const uint32_t count,
|
|||||||
return ZE_RESULT_SUCCESS;
|
return ZE_RESULT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MetricGroupDomains::activateMetricGroupDeffered(const zet_metric_group_handle_t hMetricGroup) {
|
bool MetricGroupDomains::activateMetricGroupDeferred(const zet_metric_group_handle_t hMetricGroup) {
|
||||||
|
|
||||||
const auto properites = MetricGroup::getProperties(hMetricGroup);
|
const auto properites = MetricGroup::getProperties(hMetricGroup);
|
||||||
const auto domain = properites.domain;
|
const auto domain = properites.domain;
|
||||||
|
|||||||
@@ -603,7 +603,7 @@ bool CommandQueue::bufferCpuCopyAllowed(Buffer *buffer, cl_command_type commandT
|
|||||||
}
|
}
|
||||||
|
|
||||||
//check if it is beneficial to do transfer on CPU
|
//check if it is beneficial to do transfer on CPU
|
||||||
if (!buffer->isReadWriteOnCpuPreffered(ptr, size, getDevice())) {
|
if (!buffer->isReadWriteOnCpuPreferred(ptr, size, getDevice())) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -564,7 +564,7 @@ bool Buffer::isReadWriteOnCpuAllowed(uint32_t rootDeviceIndex) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Buffer::isReadWriteOnCpuPreffered(void *ptr, size_t size, const Device &device) {
|
bool Buffer::isReadWriteOnCpuPreferred(void *ptr, size_t size, const Device &device) {
|
||||||
auto graphicsAllocation = multiGraphicsAllocation.getGraphicsAllocation(device.getRootDeviceIndex());
|
auto graphicsAllocation = multiGraphicsAllocation.getGraphicsAllocation(device.getRootDeviceIndex());
|
||||||
if (MemoryPool::isSystemMemoryPool(graphicsAllocation->getMemoryPool())) {
|
if (MemoryPool::isSystemMemoryPool(graphicsAllocation->getMemoryPool())) {
|
||||||
//if buffer is not zero copy and pointer is aligned it will be more beneficial to do the transfer on GPU
|
//if buffer is not zero copy and pointer is aligned it will be more beneficial to do the transfer on GPU
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ class Buffer : public MemObj {
|
|||||||
void transferDataFromHostPtr(MemObjSizeArray ©Size, MemObjOffsetArray ©Offset) override;
|
void transferDataFromHostPtr(MemObjSizeArray ©Size, MemObjOffsetArray ©Offset) override;
|
||||||
|
|
||||||
bool isReadWriteOnCpuAllowed(uint32_t rootDeviceIndex);
|
bool isReadWriteOnCpuAllowed(uint32_t rootDeviceIndex);
|
||||||
bool isReadWriteOnCpuPreffered(void *ptr, size_t size, const Device &device);
|
bool isReadWriteOnCpuPreferred(void *ptr, size_t size, const Device &device);
|
||||||
|
|
||||||
uint32_t getMocsValue(bool disableL3Cache, bool isReadOnlyArgument) const;
|
uint32_t getMocsValue(bool disableL3Cache, bool isReadOnlyArgument) const;
|
||||||
uint32_t getSurfaceSize(bool alignSizeForAuxTranslation) const;
|
uint32_t getSurfaceSize(bool alignSizeForAuxTranslation) const;
|
||||||
|
|||||||
@@ -30,11 +30,11 @@ HWTEST_F(ReadWriteBufferCpuCopyTest, givenRenderCompressedGmmWhenAskingForCpuOpe
|
|||||||
auto unalignedPtr = ptrOffset(alignedPtr, 1);
|
auto unalignedPtr = ptrOffset(alignedPtr, 1);
|
||||||
EXPECT_EQ(1u, allocation->storageInfo.getNumBanks());
|
EXPECT_EQ(1u, allocation->storageInfo.getNumBanks());
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(rootDeviceIndex));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(rootDeviceIndex));
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(unalignedPtr, 1, *pDevice));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(unalignedPtr, 1, *pDevice));
|
||||||
|
|
||||||
gmm->isRenderCompressed = true;
|
gmm->isRenderCompressed = true;
|
||||||
EXPECT_FALSE(buffer->isReadWriteOnCpuAllowed(rootDeviceIndex));
|
EXPECT_FALSE(buffer->isReadWriteOnCpuAllowed(rootDeviceIndex));
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(unalignedPtr, 1, *pDevice));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(unalignedPtr, 1, *pDevice));
|
||||||
|
|
||||||
alignedFree(alignedPtr);
|
alignedFree(alignedPtr);
|
||||||
}
|
}
|
||||||
@@ -58,7 +58,7 @@ HWTEST_F(ReadWriteBufferCpuCopyTest, GivenUnalignedReadPtrWhenReadingBufferThenM
|
|||||||
bool aligned = (reinterpret_cast<uintptr_t>(unalignedReadPtr) & (MemoryConstants::cacheLineSize - 1)) == 0;
|
bool aligned = (reinterpret_cast<uintptr_t>(unalignedReadPtr) & (MemoryConstants::cacheLineSize - 1)) == 0;
|
||||||
EXPECT_TRUE(!aligned || buffer->isMemObjZeroCopy());
|
EXPECT_TRUE(!aligned || buffer->isMemObjZeroCopy());
|
||||||
ASSERT_TRUE(buffer->isReadWriteOnCpuAllowed(pCmdQ->getDevice().getRootDeviceIndex()));
|
ASSERT_TRUE(buffer->isReadWriteOnCpuAllowed(pCmdQ->getDevice().getRootDeviceIndex()));
|
||||||
ASSERT_TRUE(buffer->isReadWriteOnCpuPreffered(unalignedReadPtr, size, context->getDevice(0)->getDevice()));
|
ASSERT_TRUE(buffer->isReadWriteOnCpuPreferred(unalignedReadPtr, size, context->getDevice(0)->getDevice()));
|
||||||
|
|
||||||
retVal = EnqueueReadBufferHelper<>::enqueueReadBuffer(pCmdQ,
|
retVal = EnqueueReadBufferHelper<>::enqueueReadBuffer(pCmdQ,
|
||||||
buffer.get(),
|
buffer.get(),
|
||||||
@@ -99,7 +99,7 @@ HWTEST_F(ReadWriteBufferCpuCopyTest, GivenUnalignedSrcPtrWhenWritingBufferThenMe
|
|||||||
bool aligned = (reinterpret_cast<uintptr_t>(unalignedWritePtr) & (MemoryConstants::cacheLineSize - 1)) == 0;
|
bool aligned = (reinterpret_cast<uintptr_t>(unalignedWritePtr) & (MemoryConstants::cacheLineSize - 1)) == 0;
|
||||||
EXPECT_TRUE(!aligned || buffer->isMemObjZeroCopy());
|
EXPECT_TRUE(!aligned || buffer->isMemObjZeroCopy());
|
||||||
ASSERT_TRUE(buffer->isReadWriteOnCpuAllowed(pCmdQ->getDevice().getRootDeviceIndex()));
|
ASSERT_TRUE(buffer->isReadWriteOnCpuAllowed(pCmdQ->getDevice().getRootDeviceIndex()));
|
||||||
ASSERT_TRUE(buffer->isReadWriteOnCpuPreffered(unalignedWritePtr, size, context->getDevice(0)->getDevice()));
|
ASSERT_TRUE(buffer->isReadWriteOnCpuPreferred(unalignedWritePtr, size, context->getDevice(0)->getDevice()));
|
||||||
|
|
||||||
retVal = EnqueueWriteBufferHelper<>::enqueueWriteBuffer(pCmdQ,
|
retVal = EnqueueWriteBufferHelper<>::enqueueWriteBuffer(pCmdQ,
|
||||||
buffer.get(),
|
buffer.get(),
|
||||||
@@ -144,31 +144,31 @@ HWTEST_F(ReadWriteBufferCpuCopyTest, GivenSpecificMemoryStructuresWhenReadingWri
|
|||||||
EXPECT_TRUE(buffer->isMemObjZeroCopy());
|
EXPECT_TRUE(buffer->isMemObjZeroCopy());
|
||||||
|
|
||||||
// zeroCopy == true && aligned/unaligned hostPtr
|
// zeroCopy == true && aligned/unaligned hostPtr
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(alignedHostPtr, MemoryConstants::cacheLineSize + 1, mockDevice->getDevice()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(alignedHostPtr, MemoryConstants::cacheLineSize + 1, mockDevice->getDevice()));
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(unalignedHostPtr, MemoryConstants::cacheLineSize, mockDevice->getDevice()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(unalignedHostPtr, MemoryConstants::cacheLineSize, mockDevice->getDevice()));
|
||||||
|
|
||||||
buffer.reset(Buffer::create(context, CL_MEM_USE_HOST_PTR, size, unalignedBufferPtr, retVal));
|
buffer.reset(Buffer::create(context, CL_MEM_USE_HOST_PTR, size, unalignedBufferPtr, retVal));
|
||||||
|
|
||||||
EXPECT_EQ(retVal, CL_SUCCESS);
|
EXPECT_EQ(retVal, CL_SUCCESS);
|
||||||
|
|
||||||
// zeroCopy == false && unaligned hostPtr
|
// zeroCopy == false && unaligned hostPtr
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(unalignedHostPtr, MemoryConstants::cacheLineSize, mockDevice->getDevice()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(unalignedHostPtr, MemoryConstants::cacheLineSize, mockDevice->getDevice()));
|
||||||
|
|
||||||
buffer.reset(Buffer::create(mockContext.get(), CL_MEM_USE_HOST_PTR, 1 * MB, smallBufferPtr, retVal));
|
buffer.reset(Buffer::create(mockContext.get(), CL_MEM_USE_HOST_PTR, 1 * MB, smallBufferPtr, retVal));
|
||||||
|
|
||||||
// platform LP == true && size <= 10 MB
|
// platform LP == true && size <= 10 MB
|
||||||
mockDevice->deviceInfo.platformLP = true;
|
mockDevice->deviceInfo.platformLP = true;
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(smallBufferPtr, 1 * MB, mockDevice->getDevice()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(smallBufferPtr, 1 * MB, mockDevice->getDevice()));
|
||||||
|
|
||||||
// platform LP == false && size <= 10 MB
|
// platform LP == false && size <= 10 MB
|
||||||
mockDevice->deviceInfo.platformLP = false;
|
mockDevice->deviceInfo.platformLP = false;
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(smallBufferPtr, 1 * MB, mockDevice->getDevice()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(smallBufferPtr, 1 * MB, mockDevice->getDevice()));
|
||||||
|
|
||||||
buffer.reset(Buffer::create(mockContext.get(), CL_MEM_ALLOC_HOST_PTR, largeBufferSize, nullptr, retVal));
|
buffer.reset(Buffer::create(mockContext.get(), CL_MEM_ALLOC_HOST_PTR, largeBufferSize, nullptr, retVal));
|
||||||
|
|
||||||
// platform LP == false && size > 10 MB
|
// platform LP == false && size > 10 MB
|
||||||
mockDevice->deviceInfo.platformLP = false;
|
mockDevice->deviceInfo.platformLP = false;
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(buffer->getCpuAddress(), largeBufferSize, mockDevice->getDevice()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(buffer->getCpuAddress(), largeBufferSize, mockDevice->getDevice()));
|
||||||
|
|
||||||
alignedFree(smallBufferPtr);
|
alignedFree(smallBufferPtr);
|
||||||
alignedFree(alignedHostPtr);
|
alignedFree(alignedHostPtr);
|
||||||
@@ -203,13 +203,13 @@ HWTEST_F(ReadWriteBufferCpuCopyTest, GivenSpecificMemoryStructuresWhenReadingWri
|
|||||||
EXPECT_EQ(retVal, CL_SUCCESS);
|
EXPECT_EQ(retVal, CL_SUCCESS);
|
||||||
|
|
||||||
// zeroCopy == false && aligned hostPtr
|
// zeroCopy == false && aligned hostPtr
|
||||||
EXPECT_FALSE(buffer->isReadWriteOnCpuPreffered(alignedHostPtr, MemoryConstants::cacheLineSize + 1, mockDevice->getDevice()));
|
EXPECT_FALSE(buffer->isReadWriteOnCpuPreferred(alignedHostPtr, MemoryConstants::cacheLineSize + 1, mockDevice->getDevice()));
|
||||||
|
|
||||||
buffer.reset(Buffer::create(mockContext.get(), CL_MEM_ALLOC_HOST_PTR, largeBufferSize, nullptr, retVal));
|
buffer.reset(Buffer::create(mockContext.get(), CL_MEM_ALLOC_HOST_PTR, largeBufferSize, nullptr, retVal));
|
||||||
|
|
||||||
// platform LP == true && size > 10 MB
|
// platform LP == true && size > 10 MB
|
||||||
mockDevice->deviceInfo.platformLP = true;
|
mockDevice->deviceInfo.platformLP = true;
|
||||||
EXPECT_FALSE(buffer->isReadWriteOnCpuPreffered(buffer->getCpuAddress(), largeBufferSize, mockDevice->getDevice()));
|
EXPECT_FALSE(buffer->isReadWriteOnCpuPreferred(buffer->getCpuAddress(), largeBufferSize, mockDevice->getDevice()));
|
||||||
|
|
||||||
alignedFree(alignedHostPtr);
|
alignedFree(alignedHostPtr);
|
||||||
alignedFree(alignedBufferPtr);
|
alignedFree(alignedBufferPtr);
|
||||||
@@ -255,11 +255,11 @@ TEST(ReadWriteBufferOnCpu, givenNoHostPtrAndAlignedSizeWhenMemoryAllocationIsInN
|
|||||||
ASSERT_NE(nullptr, buffer.get());
|
ASSERT_NE(nullptr, buffer.get());
|
||||||
|
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getRootDeviceIndex()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getRootDeviceIndex()));
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuPreffered(reinterpret_cast<void *>(0x1000), MemoryConstants::pageSize, device->getDevice()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuPreferred(reinterpret_cast<void *>(0x1000), MemoryConstants::pageSize, device->getDevice()));
|
||||||
reinterpret_cast<MemoryAllocation *>(buffer->getGraphicsAllocation(device->getRootDeviceIndex()))->overrideMemoryPool(MemoryPool::SystemCpuInaccessible);
|
reinterpret_cast<MemoryAllocation *>(buffer->getGraphicsAllocation(device->getRootDeviceIndex()))->overrideMemoryPool(MemoryPool::SystemCpuInaccessible);
|
||||||
//read write on CPU is allowed, but not preffered. We can access this memory via Lock.
|
//read write on CPU is allowed, but not preferred. We can access this memory via Lock.
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getRootDeviceIndex()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getRootDeviceIndex()));
|
||||||
EXPECT_FALSE(buffer->isReadWriteOnCpuPreffered(reinterpret_cast<void *>(0x1000), MemoryConstants::pageSize, device->getDevice()));
|
EXPECT_FALSE(buffer->isReadWriteOnCpuPreferred(reinterpret_cast<void *>(0x1000), MemoryConstants::pageSize, device->getDevice()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(ReadWriteBufferOnCpu, givenPointerThatRequiresCpuCopyWhenCpuCopyIsEvaluatedThenTrueIsReturned) {
|
TEST(ReadWriteBufferOnCpu, givenPointerThatRequiresCpuCopyWhenCpuCopyIsEvaluatedThenTrueIsReturned) {
|
||||||
@@ -313,5 +313,5 @@ TEST(ReadWriteBufferOnCpu, whenLocalMemoryPoolAllocationIsAskedForPreferenceThen
|
|||||||
reinterpret_cast<MemoryAllocation *>(buffer->getGraphicsAllocation(device->getRootDeviceIndex()))->overrideMemoryPool(MemoryPool::LocalMemory);
|
reinterpret_cast<MemoryAllocation *>(buffer->getGraphicsAllocation(device->getRootDeviceIndex()))->overrideMemoryPool(MemoryPool::LocalMemory);
|
||||||
|
|
||||||
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getRootDeviceIndex()));
|
EXPECT_TRUE(buffer->isReadWriteOnCpuAllowed(device->getRootDeviceIndex()));
|
||||||
EXPECT_FALSE(buffer->isReadWriteOnCpuPreffered(reinterpret_cast<void *>(0x1000), MemoryConstants::pageSize, device->getDevice()));
|
EXPECT_FALSE(buffer->isReadWriteOnCpuPreferred(reinterpret_cast<void *>(0x1000), MemoryConstants::pageSize, device->getDevice()));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,12 +12,12 @@
|
|||||||
|
|
||||||
using namespace NEO;
|
using namespace NEO;
|
||||||
|
|
||||||
TEST(deferredDeleterHelper, DefferedDeleterIsDisabledWhenCheckIFDeferrDeleterIsEnabledThenCorrectValueReturned) {
|
TEST(deferredDeleterHelper, DeferredDeleterIsDisabledWhenCheckIFDeferrDeleterIsEnabledThenCorrectValueReturned) {
|
||||||
DebugManagerStateRestore dbgRestore;
|
DebugManagerStateRestore dbgRestore;
|
||||||
DebugManager.flags.EnableDeferredDeleter.set(false);
|
DebugManager.flags.EnableDeferredDeleter.set(false);
|
||||||
EXPECT_FALSE(isDeferredDeleterEnabled());
|
EXPECT_FALSE(isDeferredDeleterEnabled());
|
||||||
}
|
}
|
||||||
TEST(deferredDeleterHelper, DefferedDeleterIsEnabledWhenCheckIFDeferrDeleterIsEnabledThenCorrectValueReturned) {
|
TEST(deferredDeleterHelper, DeferredDeleterIsEnabledWhenCheckIFDeferrDeleterIsEnabledThenCorrectValueReturned) {
|
||||||
DebugManagerStateRestore dbgRestore;
|
DebugManagerStateRestore dbgRestore;
|
||||||
DebugManager.flags.EnableDeferredDeleter.set(true);
|
DebugManager.flags.EnableDeferredDeleter.set(true);
|
||||||
EXPECT_TRUE(isDeferredDeleterEnabled());
|
EXPECT_TRUE(isDeferredDeleterEnabled());
|
||||||
|
|||||||
@@ -585,7 +585,7 @@ TEST_F(KernelPrivateSurfaceTest, WhenChangingResidencyThenCsrResidencySizeIsUpda
|
|||||||
delete pKernel;
|
delete pKernel;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(KernelPrivateSurfaceTest, givenKernelWithPrivateSurfaceThatIsInUseByGpuWhenKernelIsBeingDestroyedThenAllocationIsAddedToDefferedFreeList) {
|
TEST_F(KernelPrivateSurfaceTest, givenKernelWithPrivateSurfaceThatIsInUseByGpuWhenKernelIsBeingDestroyedThenAllocationIsAddedToDeferredFreeList) {
|
||||||
auto pKernelInfo = std::make_unique<KernelInfo>();
|
auto pKernelInfo = std::make_unique<KernelInfo>();
|
||||||
SPatchAllocateStatelessPrivateSurface tokenSPS;
|
SPatchAllocateStatelessPrivateSurface tokenSPS;
|
||||||
tokenSPS.SurfaceStateHeapOffset = 64;
|
tokenSPS.SurfaceStateHeapOffset = 64;
|
||||||
|
|||||||
@@ -680,7 +680,7 @@ TEST_P(ProgramFromBinaryTest, givenProgramWhenCleanKernelInfoIsCalledThenKernelA
|
|||||||
EXPECT_EQ(0u, pProgram->getNumKernels());
|
EXPECT_EQ(0u, pProgram->getNumKernels());
|
||||||
}
|
}
|
||||||
|
|
||||||
HWTEST_P(ProgramFromBinaryTest, givenProgramWhenCleanCurrentKernelInfoIsCalledButGpuIsNotYetDoneThenKernelAllocationIsPutOnDefferedFreeListAndCsrRegistersCacheFlush) {
|
HWTEST_P(ProgramFromBinaryTest, givenProgramWhenCleanCurrentKernelInfoIsCalledButGpuIsNotYetDoneThenKernelAllocationIsPutOnDeferredFreeListAndCsrRegistersCacheFlush) {
|
||||||
cl_device_id device = pClDevice;
|
cl_device_id device = pClDevice;
|
||||||
auto &csr = pDevice->getGpgpuCommandStreamReceiver();
|
auto &csr = pDevice->getGpgpuCommandStreamReceiver();
|
||||||
EXPECT_TRUE(csr.getTemporaryAllocations().peekIsEmpty());
|
EXPECT_TRUE(csr.getTemporaryAllocations().peekIsEmpty());
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ TEST_F(CompilerInterfaceTest, WhenCompilingToIsaThenSuccessIsReturned) {
|
|||||||
EXPECT_EQ(TranslationOutput::ErrorCode::Success, err);
|
EXPECT_EQ(TranslationOutput::ErrorCode::Success, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(CompilerInterfaceTest, WhenPrefferedIntermediateRepresentationSpecifiedThenPreserveIt) {
|
TEST_F(CompilerInterfaceTest, WhenPreferredIntermediateRepresentationSpecifiedThenPreserveIt) {
|
||||||
TranslationOutput translationOutput;
|
TranslationOutput translationOutput;
|
||||||
inputArgs.preferredIntermediateType = IGC::CodeType::llvmLl;
|
inputArgs.preferredIntermediateType = IGC::CodeType::llvmLl;
|
||||||
auto err = pCompilerInterface->build(*pDevice, inputArgs, translationOutput);
|
auto err = pCompilerInterface->build(*pDevice, inputArgs, translationOutput);
|
||||||
|
|||||||
Reference in New Issue
Block a user