Add mechanism to migrate multi root device memory

invalidate TLB cache if kernel requires migration


Related-To: NEO-3691

Signed-off-by: Mateusz Jablonski <mateusz.jablonski@intel.com>
This commit is contained in:
Mateusz Jablonski
2021-07-01 16:00:22 +00:00
committed by Compute-Runtime-Automation
parent 714a1ebf53
commit 6f3c89decb
37 changed files with 1544 additions and 173 deletions

View File

@@ -221,6 +221,7 @@ CompletionStamp CommandStreamReceiverHw<GfxFamily>::flushTask(
if (updateTag) {
PipeControlArgs args(dispatchFlags.dcFlush);
args.notifyEnable = isUsedNotifyEnableForPostSync();
args.tlbInvalidation |= dispatchFlags.memoryMigrationRequired;
MemorySynchronizationCommands<GfxFamily>::addPipeControlAndProgramPostSyncOperation(
commandStreamTask,
PIPE_CONTROL::POST_SYNC_OPERATION_WRITE_IMMEDIATE_DATA,

View File

@@ -56,33 +56,34 @@ struct DispatchFlags {
KernelExecutionType kernelExecutionTypeP, MemoryCompressionState memoryCompressionStateP,
uint64_t sliceCountP, bool blockingP, bool dcFlushP, bool useSLMP, bool guardCommandBufferWithPipeControlP, bool gsba32BitRequiredP,
bool requiresCoherencyP, bool lowPriorityP, bool implicitFlushP, bool outOfOrderExecutionAllowedP, bool epilogueRequiredP,
bool usePerDSSbackedBufferP, bool useSingleSubdeviceP, bool useGlobalAtomicsP, size_t areMultipleSubDevicesInContextP) : csrDependencies(csrDependenciesP),
barrierTimestampPacketNodes(barrierTimestampPacketNodesP),
pipelineSelectArgs(pipelineSelectArgsP),
flushStampReference(flushStampReferenceP),
throttle(throttleP),
preemptionMode(preemptionModeP),
numGrfRequired(numGrfRequiredP),
l3CacheSettings(l3CacheSettingsP),
threadArbitrationPolicy(threadArbitrationPolicyP),
additionalKernelExecInfo(additionalKernelExecInfoP),
kernelExecutionType(kernelExecutionTypeP),
memoryCompressionState(memoryCompressionStateP),
sliceCount(sliceCountP),
blocking(blockingP),
dcFlush(dcFlushP),
useSLM(useSLMP),
guardCommandBufferWithPipeControl(guardCommandBufferWithPipeControlP),
gsba32BitRequired(gsba32BitRequiredP),
requiresCoherency(requiresCoherencyP),
lowPriority(lowPriorityP),
implicitFlush(implicitFlushP),
outOfOrderExecutionAllowed(outOfOrderExecutionAllowedP),
epilogueRequired(epilogueRequiredP),
usePerDssBackedBuffer(usePerDSSbackedBufferP),
useSingleSubdevice(useSingleSubdeviceP),
useGlobalAtomics(useGlobalAtomicsP),
areMultipleSubDevicesInContext(areMultipleSubDevicesInContextP){};
bool usePerDSSbackedBufferP, bool useSingleSubdeviceP, bool useGlobalAtomicsP, bool areMultipleSubDevicesInContextP, bool memoryMigrationRequiredP) : csrDependencies(csrDependenciesP),
barrierTimestampPacketNodes(barrierTimestampPacketNodesP),
pipelineSelectArgs(pipelineSelectArgsP),
flushStampReference(flushStampReferenceP),
throttle(throttleP),
preemptionMode(preemptionModeP),
numGrfRequired(numGrfRequiredP),
l3CacheSettings(l3CacheSettingsP),
threadArbitrationPolicy(threadArbitrationPolicyP),
additionalKernelExecInfo(additionalKernelExecInfoP),
kernelExecutionType(kernelExecutionTypeP),
memoryCompressionState(memoryCompressionStateP),
sliceCount(sliceCountP),
blocking(blockingP),
dcFlush(dcFlushP),
useSLM(useSLMP),
guardCommandBufferWithPipeControl(guardCommandBufferWithPipeControlP),
gsba32BitRequired(gsba32BitRequiredP),
requiresCoherency(requiresCoherencyP),
lowPriority(lowPriorityP),
implicitFlush(implicitFlushP),
outOfOrderExecutionAllowed(outOfOrderExecutionAllowedP),
epilogueRequired(epilogueRequiredP),
usePerDssBackedBuffer(usePerDSSbackedBufferP),
useSingleSubdevice(useSingleSubdeviceP),
useGlobalAtomics(useGlobalAtomicsP),
areMultipleSubDevicesInContext(areMultipleSubDevicesInContextP),
memoryMigrationRequired(memoryMigrationRequiredP){};
CsrDependencies csrDependencies;
TimestampPacketContainer *barrierTimestampPacketNodes = nullptr;
@@ -112,6 +113,7 @@ struct DispatchFlags {
bool useSingleSubdevice = false;
bool useGlobalAtomics = false;
bool areMultipleSubDevicesInContext = false;
bool memoryMigrationRequired = false;
};
struct CsrSizeRequestFlags {