fix(ocl): handle gl sharing displayable textures

Displayable textures always need dc flush.

Related-To: NEO-11694

Signed-off-by: Dominik Dabek <dominik.dabek@intel.com>
This commit is contained in:
Dominik Dabek 2024-06-27 11:34:50 +00:00 committed by Compute-Runtime-Automation
parent d1e56b4e1e
commit 76e8be5c39
16 changed files with 76 additions and 7 deletions

View File

@ -622,12 +622,14 @@ cl_int CommandQueue::enqueueReleaseSharedObjects(cl_uint numObjects, const cl_me
}
bool isImageReleased = false;
bool isDisplayableReleased = false;
for (unsigned int object = 0; object < numObjects; object++) {
auto memObject = castToObject<MemObj>(memObjects[object]);
if (memObject == nullptr || memObject->peekSharingHandler() == nullptr) {
return CL_INVALID_MEM_OBJECT;
}
isImageReleased |= memObject->getMultiGraphicsAllocation().getAllocationType() == AllocationType::sharedImage;
isDisplayableReleased |= memObject->isMemObjDisplayable();
memObject->peekSharingHandler()->release(memObject, getDevice().getRootDeviceIndex());
DEBUG_BREAK_IF(memObject->acquireCount <= 0);
@ -635,7 +637,7 @@ cl_int CommandQueue::enqueueReleaseSharedObjects(cl_uint numObjects, const cl_me
}
if (this->getGpgpuCommandStreamReceiver().isDirectSubmissionEnabled()) {
if (this->getDevice().getProductHelper().isDcFlushMitigated()) {
if (this->getDevice().getProductHelper().isDcFlushMitigated() || isDisplayableReleased) {
this->getGpgpuCommandStreamReceiver().registerDcFlushForDcMitigation();
this->getGpgpuCommandStreamReceiver().sendRenderStateCacheFlush();
} else if (isImageReleased) {

View File

@ -405,6 +405,7 @@ Image *Image::createSharedImage(Context *context, SharingHandler *sharingHandler
sharedImage->setSurfaceOffsets(imgInfo.offset, imgInfo.xOffset, imgInfo.yOffset, imgInfo.yOffsetForUVPlane);
sharedImage->setMcsSurfaceInfo(mcsSurfaceInfo);
sharedImage->setPlane(imgInfo.plane);
sharedImage->setIsDisplayable(imgInfo.isDisplayable);
return sharedImage;
}

View File

@ -171,6 +171,8 @@ class Image : public MemObj {
surfaceOffsets.yOffsetForUVplane = yOffsetForUVPlane;
}
void getSurfaceOffsets(SurfaceOffsets &surfaceOffsetsOut) { surfaceOffsetsOut = this->surfaceOffsets; }
bool getIsDisplayable() const { return isDisplayable; }
void setIsDisplayable(bool displayable) { this->isDisplayable = displayable; }
void setCubeFaceIndex(uint32_t index) { cubeFaceIndex = index; }
uint32_t getCubeFaceIndex() { return cubeFaceIndex; }

View File

@ -275,6 +275,10 @@ bool MemObj::isMemObjUncacheableForSurfaceState() const {
return isAnyBitSet(flagsIntel, CL_MEM_LOCALLY_UNCACHED_SURFACE_STATE_RESOURCE | CL_MEM_LOCALLY_UNCACHED_RESOURCE);
}
bool MemObj::isMemObjDisplayable() const {
return this->isDisplayable;
}
GraphicsAllocation *MemObj::getGraphicsAllocation(uint32_t rootDeviceIndex) const {
return multiGraphicsAllocation.getGraphicsAllocation(rootDeviceIndex);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2018-2023 Intel Corporation
* Copyright (C) 2018-2024 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@ -70,7 +70,7 @@ class MemObj : public BaseObject<_cl_mem> {
MultiGraphicsAllocation &&multiGraphicsAllocation,
bool zeroCopy,
bool isHostPtrSVM,
bool isObjectRedescrbied);
bool isObjectRedescribed);
~MemObj() override;
cl_int getMemObjectInfo(cl_mem_info paramName,
@ -103,6 +103,7 @@ class MemObj : public BaseObject<_cl_mem> {
bool isMemObjWithHostPtrSVM() const;
bool isMemObjUncacheable() const;
bool isMemObjUncacheableForSurfaceState() const;
bool isMemObjDisplayable() const;
virtual void transferDataToHostPtr(MemObjSizeArray &copySize, MemObjOffsetArray &copyOffset) { UNRECOVERABLE_IF(true); };
virtual void transferDataFromHostPtr(MemObjSizeArray &copySize, MemObjOffsetArray &copyOffset) { UNRECOVERABLE_IF(true); };
@ -190,6 +191,7 @@ class MemObj : public BaseObject<_cl_mem> {
bool isZeroCopy;
bool isHostPtrSVM;
bool isObjectRedescribed;
bool isDisplayable{false};
MemoryManager *memoryManager = nullptr;
MultiGraphicsAllocation multiGraphicsAllocation;
GraphicsAllocation *mcsAllocation = nullptr;

View File

@ -198,6 +198,7 @@ Image *GlTexture::createSharedGlTexture(Context *context, cl_mem_flags flags, cl
imgInfo.surfaceFormat = &surfaceFormatInfo.surfaceFormat;
imgInfo.qPitch = qPitch;
imgInfo.isDisplayable = gmm->gmmResourceInfo->isDisplayable();
auto glTexture = new GlTexture(sharingFunctions, getClGlObjectType(target), texture, texInfo, target, std::max(miplevel, 0));

View File

@ -148,6 +148,7 @@ Image *GlTexture::createSharedGlTexture(Context *context, cl_mem_flags flags, cl
imgInfo.imgDesc = Image::convertDescriptor(imgDesc);
imgInfo.surfaceFormat = &surfaceFormatInfo.surfaceFormat;
imgInfo.qPitch = qPitch;
imgInfo.isDisplayable = gmm->gmmResourceInfo->isDisplayable();
auto glTexture = new GlTexture(sharingFunctions, getClGlObjectType(target), texture, texInfo, target, std::max(miplevel, 0));

View File

@ -1371,6 +1371,35 @@ HWTEST_F(CommandQueueTests, givenDirectSubmissionAndSharedImageWhenReleasingShar
EXPECT_EQ(ultCsr->renderStateCacheDcFlushForced, context.getDevice(0)->getProductHelper().isDcFlushMitigated());
}
HWTEST_F(CommandQueueTests, givenDirectSubmissionAndSharedDisplayableImageWhenReleasingSharedObjectThenFlushRenderStateCacheAndForceDcFlush) {
MockContext context;
MockCommandQueue cmdQ(&context, context.getDevice(0), 0, false);
MockSharingHandler *mockSharingHandler = new MockSharingHandler;
auto image = std::unique_ptr<Image>(ImageHelper<Image2dDefaults>::create(&context));
image->setSharingHandler(mockSharingHandler);
image->getGraphicsAllocation(0u)->setAllocationType(AllocationType::sharedImage);
cl_mem memObject = image.get();
cl_uint numObjects = 1;
cl_mem *memObjects = &memObject;
cl_int result = cmdQ.enqueueAcquireSharedObjects(numObjects, memObjects, 0, nullptr, nullptr, 0);
EXPECT_EQ(result, CL_SUCCESS);
image->setIsDisplayable(true);
auto ultCsr = static_cast<UltCommandStreamReceiver<FamilyType> *>(&cmdQ.getGpgpuCommandStreamReceiver());
ultCsr->directSubmissionAvailable = true;
ultCsr->callBaseSendRenderStateCacheFlush = false;
ultCsr->flushReturnValue = SubmissionStatus::success;
EXPECT_FALSE(ultCsr->renderStateCacheFlushed);
result = cmdQ.enqueueReleaseSharedObjects(numObjects, memObjects, 0, nullptr, nullptr, 0);
EXPECT_EQ(result, CL_SUCCESS);
EXPECT_TRUE(ultCsr->renderStateCacheFlushed);
EXPECT_TRUE(ultCsr->renderStateCacheDcFlushForced);
}
HWTEST_F(CommandQueueTests, givenDcFlushMitigationAndDirectSubmissionAndBufferWhenReleasingSharedObjectThenFlushRenderStateCacheAndForceDcFlush) {
DebugManagerStateRestore restorer;
debugManager.flags.AllowDcFlush.set(0);

View File

@ -779,6 +779,7 @@ TEST_P(CreateImageHostPtr, WhenGettingImageDescThenCorrectValuesAreReturned) {
EXPECT_EQ(image->getHostPtrSlicePitch(), static_cast<size_t>(imageDesc.image_width * elementSize * imageDesc.image_height) * isArrayOr3DType);
EXPECT_EQ(image->getImageCount(), 1u);
EXPECT_NE(0u, image->getSize());
EXPECT_FALSE(image->getIsDisplayable());
EXPECT_NE(nullptr, allocation);
}

View File

@ -566,6 +566,14 @@ TEST_F(GlSharingTextureTests, givenAuxDisabledAndUnifiedAuxCapableWhenGlTextureI
EXPECT_FALSE(graphicsAllocation->getDefaultGmm()->isCompressionEnabled());
}
TEST_F(GlSharingTextureTests, givenGmmInfoIsDisplayableWhenGlTextureIsCreatedThenImageHasDisplayableSet) {
auto mockGmmResInfo = static_cast<MockGmmResourceInfo *>(tempMM->forceGmm->gmmResourceInfo.get());
mockGmmResInfo->isDisplayableValue = true;
cl_int retVal = CL_SUCCESS;
auto glTexture = std::unique_ptr<Image>(GlTexture::createSharedGlTexture(clContext.get(), CL_MEM_WRITE_ONLY, GL_SRGB8_ALPHA8, 0, textureId, &retVal));
EXPECT_TRUE(glTexture->getIsDisplayable());
}
class GetGlTextureInfoTests : public GlSharingTextureTests,
public ::testing::WithParamInterface<unsigned int /*cl_GLenum*/> {
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2018-2023 Intel Corporation
* Copyright (C) 2018-2024 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@ -78,6 +78,8 @@ class GmmResourceInfo : NonCopyableOrMovableClass {
MOCKABLE_VIRTUAL bool is64KBPageSuitable() const { return resourceInfo->Is64KBPageSuitable(); }
MOCKABLE_VIRTUAL bool isDisplayable() const;
MOCKABLE_VIRTUAL GMM_RESOURCE_INFO *peekGmmResourceInfo() const { return resourceInfo.get(); }
MOCKABLE_VIRTUAL GMM_RESOURCE_USAGE_TYPE getCachePolicyUsage() const { return resourceInfo->GetCachePolicyUsage(); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2021 Intel Corporation
* Copyright (C) 2021-2024 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@ -12,4 +12,8 @@ namespace NEO {
uint64_t GmmResourceInfo::getDriverProtectionBits() {
return 0u;
}
bool GmmResourceInfo::isDisplayable() const {
return false;
}
} // namespace NEO

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2021 Intel Corporation
* Copyright (C) 2021-2024 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@ -12,4 +12,8 @@ namespace NEO {
uint64_t GmmResourceInfo::getDriverProtectionBits() {
return 0u;
}
bool GmmResourceInfo::isDisplayable() const {
return false;
}
} // namespace NEO

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2021 Intel Corporation
* Copyright (C) 2021-2024 Intel Corporation
*
* SPDX-License-Identifier: MIT
*
@ -12,4 +12,8 @@ namespace NEO {
uint64_t GmmResourceInfo::getDriverProtectionBits() {
return 0u;
}
bool GmmResourceInfo::isDisplayable() const {
return false;
}
} // namespace NEO

View File

@ -246,6 +246,7 @@ struct ImageInfo {
uint32_t mipCount;
bool linearStorage;
bool useLocalMemory;
bool isDisplayable;
};
struct ImageImplicitArgs {

View File

@ -83,6 +83,8 @@ class MockGmmResourceInfo : public GmmResourceInfo {
bool is64KBPageSuitable() const override { return is64KBPageSuitableValue; }
bool isDisplayable() const override { return isDisplayableValue; }
GMM_RESOURCE_INFO *peekGmmResourceInfo() const override { return mockResourceInfoHandle; }
GMM_RESOURCE_USAGE_TYPE getCachePolicyUsage() const override { return usageType; }
@ -118,6 +120,7 @@ class MockGmmResourceInfo : public GmmResourceInfo {
uint32_t arrayIndexPassedToGetOffset = 0;
SurfaceFormatInfo tempSurface{};
bool is64KBPageSuitableValue = true;
bool isDisplayableValue = false;
GMM_RES_COPY_BLT requestedResCopyBlt = {};
uint32_t cpuBltCalled = 0u;
uint8_t cpuBltResult = 1u;