mirror of
https://github.com/intel/compute-runtime.git
synced 2026-01-03 06:49:52 +08:00
refactor: remove not needed code
Signed-off-by: Mrozek, Michal <michal.mrozek@intel.com>
This commit is contained in:
committed by
Compute-Runtime-Automation
parent
fff97d3958
commit
f71f6d2b72
@@ -190,7 +190,6 @@ NEO::CompletionStamp CommandListCoreFamilyImmediate<gfxCoreFamily>::flushImmedia
|
||||
args.numAvailableDevices = neoDevice->getNumGenericSubDevices();
|
||||
args.allocation = this->device->getDebugSurface();
|
||||
args.gmmHelper = neoDevice->getGmmHelper();
|
||||
args.useGlobalAtomics = false;
|
||||
args.areMultipleSubDevicesInContext = false;
|
||||
args.isDebuggerActive = true;
|
||||
NEO::EncodeSurfaceState<GfxFamily>::encodeBuffer(args);
|
||||
@@ -242,7 +241,6 @@ NEO::CompletionStamp CommandListCoreFamilyImmediate<gfxCoreFamily>::flushRegular
|
||||
this->csr->isNTo1SubmissionModelEnabled(), // outOfOrderExecutionAllowed
|
||||
false, // epilogueRequired
|
||||
false, // usePerDssBackedBuffer
|
||||
false, // useGlobalAtomics
|
||||
this->device->getNEODevice()->getNumGenericSubDevices() > 1, // areMultipleSubDevicesInContext
|
||||
false, // memoryMigrationRequired
|
||||
false, // textureCacheFlush
|
||||
@@ -304,7 +302,6 @@ NEO::CompletionStamp CommandListCoreFamilyImmediate<gfxCoreFamily>::flushRegular
|
||||
args.numAvailableDevices = neoDevice->getNumGenericSubDevices();
|
||||
args.allocation = this->device->getDebugSurface();
|
||||
args.gmmHelper = neoDevice->getGmmHelper();
|
||||
args.useGlobalAtomics = false;
|
||||
args.areMultipleSubDevicesInContext = false;
|
||||
args.isDebuggerActive = true;
|
||||
NEO::EncodeSurfaceState<GfxFamily>::encodeBuffer(args);
|
||||
|
||||
@@ -210,7 +210,6 @@ ze_result_t CommandListCoreFamily<gfxCoreFamily>::appendLaunchKernelWithParams(K
|
||||
launchParams.isPredicate, // isPredicate
|
||||
false, // isTimestampEvent
|
||||
uncachedMocsKernel, // requiresUncachedMocs
|
||||
false, // useGlobalAtomics
|
||||
internalUsage, // isInternal
|
||||
launchParams.isCooperative, // isCooperative
|
||||
false, // isHostScopeSignalEvent
|
||||
@@ -240,7 +239,6 @@ ze_result_t CommandListCoreFamily<gfxCoreFamily>::appendLaunchKernelWithParams(K
|
||||
args.numAvailableDevices = neoDevice->getNumGenericSubDevices();
|
||||
args.allocation = device->getDebugSurface();
|
||||
args.gmmHelper = neoDevice->getGmmHelper();
|
||||
args.useGlobalAtomics = kernelDescriptor.kernelAttributes.flags.useGlobalAtomics;
|
||||
args.areMultipleSubDevicesInContext = false;
|
||||
args.isDebuggerActive = true;
|
||||
NEO::EncodeSurfaceState<GfxFamily>::encodeBuffer(args);
|
||||
|
||||
@@ -328,7 +328,6 @@ ze_result_t CommandListCoreFamily<gfxCoreFamily>::appendLaunchKernelWithParams(K
|
||||
launchParams.isPredicate, // isPredicate
|
||||
isTimestampEvent, // isTimestampEvent
|
||||
uncachedMocsKernel, // requiresUncachedMocs
|
||||
cmdListDefaultGlobalAtomics, // useGlobalAtomics
|
||||
internalUsage, // isInternal
|
||||
launchParams.isCooperative, // isCooperative
|
||||
isHostSignalScopeEvent, // isHostScopeSignalEvent
|
||||
@@ -389,7 +388,6 @@ ze_result_t CommandListCoreFamily<gfxCoreFamily>::appendLaunchKernelWithParams(K
|
||||
args.numAvailableDevices = neoDevice->getNumGenericSubDevices();
|
||||
args.allocation = device->getDebugSurface();
|
||||
args.gmmHelper = neoDevice->getGmmHelper();
|
||||
args.useGlobalAtomics = kernelDescriptor.kernelAttributes.flags.useGlobalAtomics;
|
||||
args.areMultipleSubDevicesInContext = args.numAvailableDevices > 1;
|
||||
args.implicitScaling = this->partitionCount > 1;
|
||||
args.isDebuggerActive = true;
|
||||
|
||||
@@ -1366,7 +1366,6 @@ Device *Device::create(DriverHandle *driverHandle, NEO::Device *neoDevice, bool
|
||||
args.numAvailableDevices = neoDevice->getNumGenericSubDevices();
|
||||
args.allocation = device->getDebugSurface();
|
||||
args.gmmHelper = neoDevice->getGmmHelper();
|
||||
args.useGlobalAtomics = false;
|
||||
args.areMultipleSubDevicesInContext = neoDevice->getNumGenericSubDevices() > 1;
|
||||
args.isDebuggerActive = true;
|
||||
gfxCoreHelper.encodeBufferSurfaceState(args);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -116,7 +116,6 @@ struct KernelHw : public KernelImp {
|
||||
args.numAvailableDevices = neoDevice->getNumGenericSubDevices();
|
||||
args.allocation = alloc;
|
||||
args.gmmHelper = neoDevice->getGmmHelper();
|
||||
args.useGlobalAtomics = kernelImmData->getDescriptor().kernelAttributes.flags.useGlobalAtomics;
|
||||
args.areMultipleSubDevicesInContext = args.numAvailableDevices > 1;
|
||||
args.implicitScaling = device->isImplicitScalingCapable();
|
||||
args.isDebuggerActive = isDebuggerActive;
|
||||
|
||||
@@ -132,7 +132,7 @@ ze_result_t KernelImmutableData::initialize(NEO::KernelInfo *kernelInfo, Device
|
||||
patchWithImplicitSurface(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
static_cast<uintptr_t>(globalConstBuffer->getGpuAddressToPatch()),
|
||||
*globalConstBuffer, kernelDescriptor->payloadMappings.implicitArgs.globalConstantsSurfaceAddress,
|
||||
*neoDevice, kernelDescriptor->kernelAttributes.flags.useGlobalAtomics, deviceImp->isImplicitScalingCapable());
|
||||
*neoDevice, deviceImp->isImplicitScalingCapable());
|
||||
this->residencyContainer.push_back(globalConstBuffer);
|
||||
} else if (nullptr != globalConstBuffer) {
|
||||
this->residencyContainer.push_back(globalConstBuffer);
|
||||
@@ -150,7 +150,7 @@ ze_result_t KernelImmutableData::initialize(NEO::KernelInfo *kernelInfo, Device
|
||||
|
||||
patchImplicitArgBindlessOffsetAndSetSurfaceState(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
globalConstBuffer, kernelDescriptor->payloadMappings.implicitArgs.globalConstantsSurfaceAddress,
|
||||
*neoDevice, kernelDescriptor->kernelAttributes.flags.useGlobalAtomics, deviceImp->isImplicitScalingCapable(), ssInHeap, kernelInfo->kernelDescriptor);
|
||||
*neoDevice, deviceImp->isImplicitScalingCapable(), ssInHeap, kernelInfo->kernelDescriptor);
|
||||
}
|
||||
|
||||
if (NEO::isValidOffset(kernelDescriptor->payloadMappings.implicitArgs.globalVariablesSurfaceAddress.stateless)) {
|
||||
@@ -159,7 +159,7 @@ ze_result_t KernelImmutableData::initialize(NEO::KernelInfo *kernelInfo, Device
|
||||
patchWithImplicitSurface(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
static_cast<uintptr_t>(globalVarBuffer->getGpuAddressToPatch()),
|
||||
*globalVarBuffer, kernelDescriptor->payloadMappings.implicitArgs.globalVariablesSurfaceAddress,
|
||||
*neoDevice, kernelDescriptor->kernelAttributes.flags.useGlobalAtomics, deviceImp->isImplicitScalingCapable());
|
||||
*neoDevice, deviceImp->isImplicitScalingCapable());
|
||||
this->residencyContainer.push_back(globalVarBuffer);
|
||||
} else if (nullptr != globalVarBuffer) {
|
||||
this->residencyContainer.push_back(globalVarBuffer);
|
||||
@@ -177,7 +177,7 @@ ze_result_t KernelImmutableData::initialize(NEO::KernelInfo *kernelInfo, Device
|
||||
|
||||
patchImplicitArgBindlessOffsetAndSetSurfaceState(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
globalVarBuffer, kernelDescriptor->payloadMappings.implicitArgs.globalVariablesSurfaceAddress,
|
||||
*neoDevice, kernelDescriptor->kernelAttributes.flags.useGlobalAtomics, deviceImp->isImplicitScalingCapable(), ssInHeap, kernelInfo->kernelDescriptor);
|
||||
*neoDevice, deviceImp->isImplicitScalingCapable(), ssInHeap, kernelInfo->kernelDescriptor);
|
||||
}
|
||||
|
||||
return ZE_RESULT_SUCCESS;
|
||||
@@ -946,7 +946,6 @@ NEO::GraphicsAllocation *KernelImp::allocatePrivateMemoryGraphicsAllocation() {
|
||||
}
|
||||
|
||||
void KernelImp::patchCrossthreadDataWithPrivateAllocation(NEO::GraphicsAllocation *privateAllocation) {
|
||||
auto &kernelAttributes = kernelImmData->getDescriptor().kernelAttributes;
|
||||
auto device = module->getDevice();
|
||||
|
||||
ArrayRef<uint8_t> crossThreadDataArrayRef = ArrayRef<uint8_t>(this->crossThreadData.get(), this->crossThreadDataSize);
|
||||
@@ -955,7 +954,7 @@ void KernelImp::patchCrossthreadDataWithPrivateAllocation(NEO::GraphicsAllocatio
|
||||
patchWithImplicitSurface(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
static_cast<uintptr_t>(privateAllocation->getGpuAddressToPatch()),
|
||||
*privateAllocation, kernelImmData->getDescriptor().payloadMappings.implicitArgs.privateMemoryAddress,
|
||||
*device->getNEODevice(), kernelAttributes.flags.useGlobalAtomics, device->isImplicitScalingCapable());
|
||||
*device->getNEODevice(), device->isImplicitScalingCapable());
|
||||
}
|
||||
|
||||
void KernelImp::setInlineSamplers() {
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
|
||||
inline void patchWithImplicitSurface(ArrayRef<uint8_t> crossThreadData, ArrayRef<uint8_t> surfaceStateHeap,
|
||||
uintptr_t ptrToPatchInCrossThreadData, NEO::GraphicsAllocation &allocation,
|
||||
const NEO::ArgDescPointer &ptr, const NEO::Device &device, bool useGlobalAtomics,
|
||||
const NEO::ArgDescPointer &ptr, const NEO::Device &device,
|
||||
bool implicitScaling) {
|
||||
if (false == crossThreadData.empty()) {
|
||||
NEO::patchPointer(crossThreadData, ptr, ptrToPatchInCrossThreadData);
|
||||
@@ -26,7 +26,6 @@ inline void patchWithImplicitSurface(ArrayRef<uint8_t> crossThreadData, ArrayRef
|
||||
args.graphicsAddress = addressToPatch;
|
||||
args.gmmHelper = device.getGmmHelper();
|
||||
args.allocation = &allocation;
|
||||
args.useGlobalAtomics = useGlobalAtomics;
|
||||
args.numAvailableDevices = device.getNumGenericSubDevices();
|
||||
args.areMultipleSubDevicesInContext = args.numAvailableDevices > 1;
|
||||
args.mocs = gfxCoreHelper.getMocsIndex(*args.gmmHelper, true, false) << 1;
|
||||
@@ -38,7 +37,7 @@ inline void patchWithImplicitSurface(ArrayRef<uint8_t> crossThreadData, ArrayRef
|
||||
}
|
||||
|
||||
inline void patchImplicitArgBindlessOffsetAndSetSurfaceState(ArrayRef<uint8_t> crossThreadData, ArrayRef<uint8_t> surfaceStateHeap, NEO::GraphicsAllocation *allocation,
|
||||
const NEO::ArgDescPointer &ptr, const NEO::Device &device, bool useGlobalAtomics, bool implicitScaling,
|
||||
const NEO::ArgDescPointer &ptr, const NEO::Device &device, bool implicitScaling,
|
||||
const NEO::SurfaceStateInHeapInfo &ssInHeap, const NEO::KernelDescriptor &kernelDescriptor) {
|
||||
auto &gfxCoreHelper = device.getGfxCoreHelper();
|
||||
void *surfaceStateAddress = nullptr;
|
||||
@@ -77,7 +76,6 @@ inline void patchImplicitArgBindlessOffsetAndSetSurfaceState(ArrayRef<uint8_t> c
|
||||
args.numAvailableDevices = device.getNumGenericSubDevices();
|
||||
args.allocation = allocation;
|
||||
args.gmmHelper = device.getGmmHelper();
|
||||
args.useGlobalAtomics = useGlobalAtomics;
|
||||
args.areMultipleSubDevicesInContext = args.numAvailableDevices > 1;
|
||||
args.implicitScaling = implicitScaling;
|
||||
args.isDebuggerActive = isDebuggerActive;
|
||||
|
||||
@@ -205,7 +205,6 @@ HWTEST2_F(CommandListAppendLaunchKernel, givenNotEnoughSpaceInCommandStreamWhenA
|
||||
false, // isPredicate
|
||||
false, // isTimestampEvent
|
||||
false, // requiresUncachedMocs
|
||||
false, // useGlobalAtomics
|
||||
false, // isInternal
|
||||
false, // isCooperative
|
||||
false, // isHostScopeSignalEvent
|
||||
|
||||
@@ -702,7 +702,6 @@ HWTEST2_F(CommandListAppendLaunchKernel, givenNotEnoughSpaceInCommandStreamWhenA
|
||||
false, // isPredicate
|
||||
false, // isTimestampEvent
|
||||
false, // requiresUncachedMocs
|
||||
false, // useGlobalAtomics
|
||||
false, // isInternal
|
||||
false, // isCooperative
|
||||
false, // isHostScopeSignalEvent
|
||||
|
||||
@@ -407,7 +407,7 @@ HWTEST_F(KernelImpTest, givenSurfaceStateHeapWhenPatchWithImplicitSurfaceCalledT
|
||||
patchWithImplicitSurface(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
ptrToPatchInCrossThreadData,
|
||||
globalBuffer, ptr,
|
||||
*neoDevice, false, false);
|
||||
*neoDevice, false);
|
||||
EXPECT_EQ(encodeBufferSurfaceStateCalled, 0u);
|
||||
}
|
||||
{
|
||||
@@ -415,7 +415,7 @@ HWTEST_F(KernelImpTest, givenSurfaceStateHeapWhenPatchWithImplicitSurfaceCalledT
|
||||
patchWithImplicitSurface(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
ptrToPatchInCrossThreadData,
|
||||
globalBuffer, ptr,
|
||||
*neoDevice, false, false);
|
||||
*neoDevice, false);
|
||||
ASSERT_EQ(encodeBufferSurfaceStateCalled, 1u);
|
||||
EXPECT_FALSE(savedSurfaceStateArgs.isDebuggerActive);
|
||||
}
|
||||
@@ -424,7 +424,7 @@ HWTEST_F(KernelImpTest, givenSurfaceStateHeapWhenPatchWithImplicitSurfaceCalledT
|
||||
patchWithImplicitSurface(crossThreadDataArrayRef, surfaceStateHeapArrayRef,
|
||||
ptrToPatchInCrossThreadData,
|
||||
globalBuffer, ptr,
|
||||
*neoDevice, false, false);
|
||||
*neoDevice, false);
|
||||
ASSERT_EQ(encodeBufferSurfaceStateCalled, 2u);
|
||||
EXPECT_TRUE(savedSurfaceStateArgs.isDebuggerActive);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2022-2023 Intel Corporation
|
||||
* Copyright (C) 2022-2024 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
@@ -73,7 +73,7 @@ HWTEST2_F(KernelDebugSurfaceDG2Test, givenDebuggerWhenPatchWithImplicitSurfaceCa
|
||||
patchWithImplicitSurface(ArrayRef<uint8_t>(), surfaceStateHeapRef,
|
||||
0,
|
||||
*device->getDebugSurface(), kernel.immutableData.kernelDescriptor->payloadMappings.implicitArgs.systemThreadSurfaceAddress,
|
||||
*device->getNEODevice(), kernel.immutableData.kernelDescriptor->kernelAttributes.flags.useGlobalAtomics, device->isImplicitScalingCapable());
|
||||
*device->getNEODevice(), device->isImplicitScalingCapable());
|
||||
|
||||
auto debugSurfaceState = reinterpret_cast<RENDER_SURFACE_STATE *>(kernel.surfaceStateHeapData.get());
|
||||
debugSurfaceState = ptrOffset(debugSurfaceState, sizeof(RENDER_SURFACE_STATE));
|
||||
@@ -133,7 +133,7 @@ HWTEST2_F(KernelDebugSurfaceDG2Test, givenNoDebuggerWhenPatchWithImplicitSurface
|
||||
patchWithImplicitSurface(ArrayRef<uint8_t>(), surfaceStateHeapRef,
|
||||
0,
|
||||
*device->getDebugSurface(), kernel.immutableData.kernelDescriptor->payloadMappings.implicitArgs.systemThreadSurfaceAddress,
|
||||
*device->getNEODevice(), kernel.immutableData.kernelDescriptor->kernelAttributes.flags.useGlobalAtomics, device->isImplicitScalingCapable());
|
||||
*device->getNEODevice(), device->isImplicitScalingCapable());
|
||||
|
||||
auto debugSurfaceState = reinterpret_cast<RENDER_SURFACE_STATE *>(kernel.surfaceStateHeapData.get());
|
||||
debugSurfaceState = ptrOffset(debugSurfaceState, sizeof(RENDER_SURFACE_STATE));
|
||||
|
||||
Reference in New Issue
Block a user