Add TGL_LP support

Change-Id: I522eeb8ea285eb900890ecf454ee12ed30f867f2
This commit is contained in:
johnbasha shaik 2019-08-29 12:01:18 +05:30 committed by gbsbuild
parent 19ab520741
commit b136069616
46 changed files with 7101 additions and 724 deletions

View File

@ -24,11 +24,11 @@ cmake_minimum_required(VERSION 3.5)
project(igfx_gmmumd)
# GmmLib Api Version used for so naming
set(GMMLIB_API_MAJOR_VERSION 9)
set(GMMLIB_API_MAJOR_VERSION 10)
set(GMMLIB_API_MINOR_VERSION 0)
if(NOT DEFINED MAJOR_VERSION)
set(MAJOR_VERSION 9)
set(MAJOR_VERSION 10)
endif()
if(NOT DEFINED MINOR_VERSION)
@ -176,10 +176,12 @@ set(HEADERS_
${BS_DIR_GMMLIB}/CachePolicy/GmmCachePolicyUndefineConditionals.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen10CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen11CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen12CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen8CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen9CachePolicy.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen10.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen11.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen12.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen8.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen9.h
${BS_DIR_GMMLIB}/inc/External/Common/GmmCachePolicy.h
@ -205,10 +207,12 @@ set(HEADERS_
${BS_DIR_GMMLIB}/inc/External/Linux/GmmResourceInfoLin.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen10Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen11Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen12Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen8Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen9Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen10TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen11TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen12TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen7TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen8TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen9TextureCalc.h
@ -235,7 +239,9 @@ set(SOURCES_
${BS_DIR_GMMLIB}/CachePolicy/GmmGen9CachePolicy.cpp
${BS_DIR_GMMLIB}/CachePolicy/GmmGen10CachePolicy.cpp
${BS_DIR_GMMLIB}/CachePolicy/GmmGen11CachePolicy.cpp
${BS_DIR_GMMLIB}/CachePolicy/GmmGen12CachePolicy.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen11Platform.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen12Platform.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen8Platform.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen9Platform.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen10Platform.cpp
@ -249,6 +255,7 @@ set(SOURCES_
${BS_DIR_GMMLIB}/Texture/GmmGen9Texture.cpp
${BS_DIR_GMMLIB}/Texture/GmmGen10Texture.cpp
${BS_DIR_GMMLIB}/Texture/GmmGen11Texture.cpp
${BS_DIR_GMMLIB}/Texture/GmmGen12Texture.cpp
${BS_DIR_GMMLIB}/Texture/GmmTexture.cpp
${BS_DIR_GMMLIB}/Texture/GmmTextureAlloc.cpp
${BS_DIR_GMMLIB}/Texture/GmmTextureSpecialCases.cpp
@ -271,6 +278,7 @@ source_group("Source Files\\Cache Policy\\Client Files" FILES
${BS_DIR_GMMLIB}/CachePolicy/GmmCachePolicyResourceUsageDefinitions.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen10CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen11CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen12CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen8CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen9CachePolicy.h
)
@ -316,6 +324,7 @@ source_group("Header Files" FILES
source_group("Header Files\\External\\Common\\Cache Policy" FILES
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen10.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen11.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen12.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen8.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen9.h
)
@ -332,6 +341,7 @@ source_group("Header Files\\Internal\\Common" FILES
source_group("Header Files\\Internal\\Common\\Platform" FILES
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen10Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen11Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen12Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen8Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen9Platform.h
)
@ -339,6 +349,7 @@ source_group("Header Files\\Internal\\Common\\Platform" FILES
source_group("Header Files\\Internal\\Common\\Texture" FILES
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen10TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen11TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen12TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen7TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen8TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen9TextureCalc.h

View File

@ -49,6 +49,8 @@ DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_TIMER_PERF_QUEUE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_UNMAP_PAGING_RESERVED_GTT_DMA_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VSC_BATCH_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_WA_BATCH_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_KMD_OCA_BUFFER)
//
// 3D Usages
//
@ -99,20 +101,24 @@ DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_TILED_SHADER_RESOURCE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_TILED_UAV )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_COHERENT_UC )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_CACHED )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_COHERENT_UC)
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_CACHED)
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_OGL_WSTN_VERTEX_BUFFER )
DEFINE_RESOURCE_USAGE(GMM_RESOURCE_USAGE_POSH_VERTEX_BUFFER)
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_UAV )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_RENDER_TARGET_AND_SHADER_RESOURCE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_RENDER_TARGET_AND_SHADER_RESOURCE_PARTIALENCSURFACES )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_WDDM_HISTORY_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_CONTEXT_SAVE_RESTORE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_PTBR_PAGE_POOL )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_PTBR_BATCH_BUFFER )
//
// CM USAGES
//
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_SurfaceState )
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_StateHeap )
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_StateHeap)
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_NO_L3_SurfaceState )
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_NO_LLC_ELLC_SurfaceState )
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_NO_LLC_SurfaceState )
@ -175,7 +181,7 @@ DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_CURR_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_REF_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_MV_DATA_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_FF )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_FF)
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_DST )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_ME_DISTORTION_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_BRC_ME_DISTORTION_ENCODE )
@ -283,3 +289,7 @@ DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_OCL_IMAGE_NO_LLC_CACHING )
// Cross Adapter
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_XADAPTER_SHARED_RESOURCE )
// BCS usages
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_BLT_SOURCE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_BLT_DESTINATION )

View File

@ -254,10 +254,7 @@ GMM_STATUS GmmLib::GmmGen10CachePolicy::SetPATInitWA()
GMM_STATUS Status = GMM_SUCCESS;
#if(defined(__GMM_KMD__))
if(pGmmGlobalContext->GetGtSysInfoPtr()->EdramSizeInKb)
{
const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable()).WaNoMocsEllcOnly = 1;
}
#else
Status = GMM_ERROR;
#endif
@ -363,7 +360,7 @@ GMM_STATUS GmmLib::GmmGen10CachePolicy::SetupPAT()
{
GMM_PRIVATE_PAT PAT = {0};
if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}

View File

@ -0,0 +1,686 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#include "Internal/Common/GmmLibInc.h"
#include "External/Common/GmmCachePolicy.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen10.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen11.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen12.h"
#if __GMM_KMD__
extern "C" NTSTATUS __GmmReadDwordKeyValue(void *pKmdGmmContext, char *pPath, WCHAR *pValueName, ULONG *pValueData);
extern "C" NTSTATUS __GmmWriteDwordKeyValue(void *pKmdGmmContext, char *pCStringPath, WCHAR *pValueName, ULONG DWord);
#endif
//=============================================================================
//
// Function: IsSpecialMOCSUsage
//
// Desc: This function returns special(hw-reserved) MocsIdx based on usage
//
// Parameters: usage -> Resource usage type
// UpdateMOCS -> True if MOCS Table must be updated, ow false
//
// Return: int32_t
//
//-----------------------------------------------------------------------------
int32_t GmmLib::GmmGen12CachePolicy::IsSpecialMOCSUsage(GMM_RESOURCE_USAGE_TYPE Usage, bool &UpdateMOCS)
{
int32_t MocsIdx = -1;
UpdateMOCS = true;
//Macros for L3-Eviction Type
#define NA 0x0
#define RO 0x1
#define RW 0x2
#define SP 0x3
switch(Usage)
{
case GMM_RESOURCE_USAGE_CCS:
__GMM_ASSERT(pCachePolicy[Usage].L3 == 0); //Architecturally, CCS isn't L3-cacheable.
pCachePolicy[Usage].L3 = 0;
MocsIdx = 60;
break;
case GMM_RESOURCE_USAGE_MOCS_62:
__GMM_ASSERT(pCachePolicy[Usage].L3 == 0); //Architecturally, TR/Aux-TT node isn't L3-cacheable.
pCachePolicy[Usage].L3 = 0;
MocsIdx = 62;
break;
case GMM_RESOURCE_USAGE_L3_EVICTION:
__GMM_ASSERT(pCachePolicy[Usage].L3 == 0 &&
pCachePolicy[Usage].L3Eviction == RW); //Reserved MOCS for L3-evictions
pCachePolicy[Usage].L3 = 0;
pCachePolicy[Usage].L3Eviction = RW;
MocsIdx = 63;
break;
case GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL:
case GMM_RESOURCE_USAGE_CCS_MEDIA_WRITABLE:
__GMM_ASSERT(pCachePolicy[Usage].L3 &&
pCachePolicy[Usage].L3Eviction == SP); //Reserved MOCS for L3-evictions
//Special-case for Displayable, and similar non-LLC accesses
GMM_ASSERTDPF(pCachePolicy[Usage].LLC == 0, "MOCS#61's Special Eviction isn't for LLC caching");
pCachePolicy[Usage].L3 = 1;
pCachePolicy[Usage].L3Eviction = SP;
MocsIdx = 61;
break;
default:
UpdateMOCS = false;
break;
}
if(pCachePolicy[Usage].L3Eviction == RW)
{
GMM_CACHE_POLICY_ELEMENT L3Eviction;
L3Eviction.Value = pCachePolicy[GMM_RESOURCE_USAGE_L3_EVICTION].Value;
//For internal purpose, hw overrides MOCS#63 as L3-uncacheable, still using it for L3-evictions
if(Usage != GMM_RESOURCE_USAGE_L3_EVICTION)
{
L3Eviction.L3 = 1; //Override L3, to verify MOCS#63 applicable or not
}
__GMM_ASSERT(pCachePolicy[Usage].Value == L3Eviction.Value); //Allow mis-match due to override registries
//MocsIdx = 63; //Use non-#63 MOCS, #63 itself is L3-uncached
}
else if(pCachePolicy[Usage].L3Eviction == SP)
{
__GMM_ASSERT(pCachePolicy[Usage].Value == pCachePolicy[GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL].Value); //Allow mis-match due to override registries
MocsIdx = 61;
}
return MocsIdx;
}
//=============================================================================
//
// Function: __GmmGen12InitCachePolicy
//
// Desc: This function initializes the cache policy
//
// Parameters: pCachePolicy -> Ptr to array to be populated with the
// mapping of usages -> cache settings.
//
// Return: GMM_STATUS
//
//-----------------------------------------------------------------------------
GMM_STATUS GmmLib::GmmGen12CachePolicy::InitCachePolicy()
{
__GMM_ASSERTPTR(pCachePolicy, GMM_ERROR);
#define DEFINE_CACHE_ELEMENT(usage, llc, ellc, l3, wt, age, aom, lecc_scc, l3_scc, scf, sso, cos, hdcl1, l3evict) DEFINE_CP_ELEMENT(usage, llc, ellc, l3, wt, age, aom, lecc_scc, l3_scc, scf, sso, cos, hdcl1, l3evict, 0, 0, 0)
#include "GmmGen12CachePolicy.h"
#define TC_LLC (1)
#define TC_ELLC (0)
#define TC_LLC_ELLC (2)
#define LeCC_UNCACHEABLE (0x0)
#define LeCC_WC_UNCACHEABLE (0x1)
#define LeCC_WT_CACHEABLE (0x2) //Only used as MemPushWRite disqualifier if set along with eLLC-only -still holds on gen12+?
#define LeCC_WB_CACHEABLE (0x3)
#define L3_UNCACHEABLE (0x1)
#define L3_WB_CACHEABLE (0x3)
#define DISABLE_SKIP_CACHING_CONTROL (0x0)
#define ENABLE_SKIP_CACHING_CONTROL (0x1)
#define DISABLE_SELF_SNOOP_OVERRIDE (0x0)
#define ENABLE_SELF_SNOOP_OVERRIDE (0x1)
#define ENABLE_SELF_SNOOP_ALWAYS (0x3)
#define CLASS_SERVICE_ZERO (0x0)
{
SetUpMOCSTable();
}
{
// Define index of cache element
uint32_t Usage = 0;
#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL))
void *pKmdGmmContext = NULL;
#if(defined(__GMM_KMD__))
pKmdGmmContext = pGmmGlobalContext->GetGmmKmdContext();
#endif
OverrideCachePolicy(pKmdGmmContext);
#endif
// Process the cache policy and fill in the look up table
for(; Usage < GMM_RESOURCE_USAGE_MAX; Usage++)
{
bool CachePolicyError = false;
bool SpecialMOCS = false;
int32_t CPTblIdx = -1;
uint32_t j = 0;
uint32_t PTEValue = 0;
GMM_CACHE_POLICY_TBL_ELEMENT UsageEle = {0};
CPTblIdx = IsSpecialMOCSUsage((GMM_RESOURCE_USAGE_TYPE)Usage, SpecialMOCS);
UsageEle.LeCC.Reserved = 0; // Reserved bits zeroe'd, this is so we
// we can compare the unioned LeCC.DwordValue.
UsageEle.LeCC.SelfSnoop = DISABLE_SELF_SNOOP_OVERRIDE;
UsageEle.LeCC.CoS = CLASS_SERVICE_ZERO;
UsageEle.LeCC.SCC = 0;
UsageEle.LeCC.ESC = 0;
if(pCachePolicy[Usage].SCF && pGmmGlobalContext->GetSkuTable().FtrLLCBypass)
{
UsageEle.LeCC.SCF = pCachePolicy[Usage].SCF;
__GMM_ASSERT(pCachePolicy[Usage].LLC == 0); //LLC and ByPassLLC are mutually-exclusive
}
if(pCachePolicy[Usage].SSO & ENABLE_SELF_SNOOP_OVERRIDE)
{
UsageEle.LeCC.SelfSnoop = pCachePolicy[Usage].SSO & ENABLE_SELF_SNOOP_ALWAYS;
}
if(pCachePolicy[Usage].CoS)
{
UsageEle.LeCC.CoS = pCachePolicy[Usage].CoS;
}
if(pCachePolicy[Usage].HDCL1)
{
UsageEle.HDCL1 = 1;
}
if(pCachePolicy[Usage].LeCC_SCC)
{
UsageEle.LeCC.SCC = pCachePolicy[Usage].LeCC_SCC;
UsageEle.LeCC.ESC = ENABLE_SKIP_CACHING_CONTROL;
}
UsageEle.LeCC.LRUM = pCachePolicy[Usage].AGE;
// default to LLC target cache.
UsageEle.LeCC.TargetCache = TC_LLC;
UsageEle.LeCC.Cacheability = LeCC_WB_CACHEABLE;
if(pCachePolicy[Usage].LLC)
{
UsageEle.LeCC.TargetCache = TC_LLC;
__GMM_ASSERT(pCachePolicy[Usage].SCF == 0); //LLC and ByPassLLC are mutually-exclusive
}
else
{
UsageEle.LeCC.Cacheability = LeCC_WC_UNCACHEABLE;
}
UsageEle.L3.Reserved = 0; // Reserved bits zeroe'd, this is so we
// we can compare the unioned L3.UshortValue.
UsageEle.L3.ESC = DISABLE_SKIP_CACHING_CONTROL;
UsageEle.L3.SCC = 0;
UsageEle.L3.Cacheability = pCachePolicy[Usage].L3 ? L3_WB_CACHEABLE : L3_UNCACHEABLE;
__GMM_ASSERT((pCachePolicy[Usage].L3 && pCachePolicy[Usage].L3Eviction != 0) ||
(pCachePolicy[Usage].L3 == 0 && (pCachePolicy[Usage].L3Eviction == 0 || Usage == GMM_RESOURCE_USAGE_L3_EVICTION)));
if(pCachePolicy[Usage].L3_SCC)
{
UsageEle.L3.ESC = ENABLE_SKIP_CACHING_CONTROL;
UsageEle.L3.SCC = (uint16_t)pCachePolicy[Usage].L3_SCC;
}
//Special-case MOCS handling for MOCS Table Index 60-63
if(CPTblIdx >= GMM_GEN12_MAX_NUMBER_MOCS_INDEXES)
{
GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &pGmmGlobalContext->GetCachePolicyTlbElement()[CPTblIdx];
if(SpecialMOCS &&
!(TblEle->LeCC.DwordValue == UsageEle.LeCC.DwordValue &&
TblEle->L3.UshortValue == UsageEle.L3.UshortValue &&
TblEle->HDCL1 == UsageEle.HDCL1))
{
//Assert if being overwritten!
__GMM_ASSERT(TblEle->LeCC.DwordValue == 0 &&
TblEle->L3.UshortValue == 0 &&
TblEle->HDCL1 == 0);
#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL))
if(pCachePolicy[Usage].IsOverridenByRegkey)
{
TblEle->LeCC.DwordValue = UsageEle.LeCC.DwordValue;
TblEle->L3.UshortValue = UsageEle.L3.UshortValue;
TblEle->HDCL1 = UsageEle.HDCL1;
}
#endif
}
}
//For HDC L1 caching, MOCS Table index 48-59 should be used
else if(UsageEle.HDCL1)
{
for(j = GMM_GEN10_HDCL1_MOCS_INDEX_START; j <= CurrentMaxL1HdcMocsIndex; j++)
{
GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &pGmmGlobalContext->GetCachePolicyTlbElement()[j];
if(TblEle->LeCC.DwordValue == UsageEle.LeCC.DwordValue &&
TblEle->L3.UshortValue == UsageEle.L3.UshortValue &&
TblEle->HDCL1 == UsageEle.HDCL1)
{
CPTblIdx = j;
break;
}
}
}
else
{
// Due to unstable system behavior on TGLLP, MOCS #0 index had to be programmed as UC in MOCS lookup table - pCachePolicyTlbElement
// But still Index 0 is Reserved for Error by HW and should not be used.
// Hence Gmmlib will opt out from the MOCS#0 usage and Lookup into MOCS table and MOCS index assigment must start from Index 1.
for(j = 1; j <= CurrentMaxMocsIndex; j++)
{
GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &pGmmGlobalContext->GetCachePolicyTlbElement()[j];
if(TblEle->LeCC.DwordValue == UsageEle.LeCC.DwordValue &&
TblEle->L3.UshortValue == UsageEle.L3.UshortValue &&
TblEle->HDCL1 == UsageEle.HDCL1)
{
CPTblIdx = j;
break;
}
}
}
// Didn't find the caching settings in one of the already programmed lookup table entries.
// Need to add a new lookup table entry.
if(CPTblIdx == -1)
{
#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL))
// If the Cache Policy setting is overriden through regkey,
// don't raise an assert/log error. Raising an assert for debug/perf testing isn't really helpful
if(pCachePolicy[Usage].IsOverridenByRegkey)
{
if(UsageEle.HDCL1 && CurrentMaxL1HdcMocsIndex < GMM_GEN12_MAX_NUMBER_MOCS_INDEXES - 1)
{
GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &(pGmmGlobalContext->GetCachePolicyTlbElement()[++CurrentMaxL1HdcMocsIndex]);
CPTblIdx = CurrentMaxL1HdcMocsIndex;
TblEle->LeCC.DwordValue = UsageEle.LeCC.DwordValue;
TblEle->L3.UshortValue = UsageEle.L3.UshortValue;
TblEle->HDCL1 = UsageEle.HDCL1;
}
else if(CurrentMaxMocsIndex < GMM_GEN10_HDCL1_MOCS_INDEX_START)
{
GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &(pGmmGlobalContext->GetCachePolicyTlbElement()[++CurrentMaxMocsIndex]);
CPTblIdx = CurrentMaxMocsIndex;
TblEle->LeCC.DwordValue = UsageEle.LeCC.DwordValue;
TblEle->L3.UshortValue = UsageEle.L3.UshortValue;
TblEle->HDCL1 = UsageEle.HDCL1;
}
else
{
// Too many unique caching combinations to program the
// MOCS lookup table.
CachePolicyError = true;
GMM_ASSERTDPF(
"Cache Policy Init Error: Invalid Cache Programming, too many unique caching combinations"
"(we only support GMM_GEN_MAX_NUMBER_MOCS_INDEXES = %d)",
GMM_MAX_NUMBER_MOCS_INDEXES - 1);
// Set cache policy index to uncached.
CPTblIdx = 3;
}
}
else
#endif
{
GMM_ASSERTDPF(false, "CRITICAL ERROR: Cache Policy Usage value specified by Client is not defined in Fixed MOCS Table!");
// Log Error using regkey to indicate the above error
#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL) && __GMM_KMD__)
REGISTRY_OVERRIDE_WRITE(pKmdGmmContext, Usage, NewMOCSEntryLeCCValue, UsageEle.LeCC.DwordValue);
REGISTRY_OVERRIDE_WRITE(pKmdGmmContext, Usage, NewMOCSEntryL3Value, UsageEle.L3.UshortValue);
REGISTRY_OVERRIDE_WRITE(pKmdGmmContext, Usage, NewMOCSEntryHDCL1, UsageEle.HDCL1);
#endif
CachePolicyError = true;
GMM_ASSERTDPF(
"Cache Policy Init Error: Invalid Cache Programming, too many unique caching combinations"
"(we only support GMM_GEN_MAX_NUMBER_MOCS_INDEXES = %d)",
CurrentMaxMocsIndex);
// Set cache policy index to uncached.
CPTblIdx = 3;
}
}
// PTE entries do not control caching on SKL+ (for legacy context)
if(!GetUsagePTEValue(pCachePolicy[Usage], Usage, &PTEValue))
{
CachePolicyError = true;
}
pCachePolicy[Usage].PTE.DwordValue = PTEValue;
pCachePolicy[Usage].MemoryObjectOverride.Gen12.Index = CPTblIdx;
pCachePolicy[Usage].Override = ALWAYS_OVERRIDE;
if(CachePolicyError)
{
GMM_ASSERTDPF("Cache Policy Init Error: Invalid Cache Programming - Element %d", Usage);
}
}
}
return GMM_SUCCESS;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Return true if (MT2) is a better match for (WantedMT)
/// than (MT1)
///
/// @param[in] WantedMT: Wanted Memory Type
/// @param[in] MT1: Memory Type for PATIdx1
/// @param[in] MT2: Memory Type for PATIdx2
///
/// @return Select the new PAT Index True/False
/////////////////////////////////////////////////////////////////////////////////////
uint8_t GmmLib::GmmGen12CachePolicy::SelectNewPATIdx(GMM_GFX_MEMORY_TYPE WantedMT,
GMM_GFX_MEMORY_TYPE MT1, GMM_GFX_MEMORY_TYPE MT2)
{
uint8_t SelectPAT2 = 0;
// select on Memory Type
if(MT1 != WantedMT)
{
if(MT2 == WantedMT || MT2 == GMM_GFX_UC_WITH_FENCE)
{
SelectPAT2 = 1;
}
goto EXIT;
}
EXIT:
return SelectPAT2;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Returns the PAT idx that best matches the cache policy for this usage.
///
/// @param: CachePolicy: cache policy for a usage
///
/// @return PAT Idx to use in the PTE
/////////////////////////////////////////////////////////////////////////////////////
uint32_t GmmLib::GmmGen12CachePolicy::BestMatchingPATIdx(GMM_CACHE_POLICY_ELEMENT CachePolicy)
{
uint32_t i;
uint32_t PATIdx = 0;
GMM_GFX_MEMORY_TYPE WantedMemoryType = GMM_GFX_UC_WITH_FENCE, MemoryType;
WA_TABLE * pWaTable = &const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable());
WantedMemoryType = GetWantedMemoryType(CachePolicy);
// Override wantedMemoryType so that PAT.MT is UC
// Gen12 uses max function to resolve PAT-vs-MOCS MemType, So unless PTE.PAT says UC, MOCS won't be able to set UC!
if(pWaTable->WaMemTypeIsMaxOfPatAndMocs)
{
WantedMemoryType = GMM_GFX_UC_WITH_FENCE;
}
for(i = 1; i < GMM_NUM_PAT_ENTRIES; i++)
{
GMM_PRIVATE_PAT PAT1 = GetPrivatePATEntry(PATIdx);
GMM_PRIVATE_PAT PAT2 = GetPrivatePATEntry(i);
if(SelectNewPATIdx(WantedMemoryType,
(GMM_GFX_MEMORY_TYPE)PAT1.Gen12.MemoryType,
(GMM_GFX_MEMORY_TYPE)PAT2.Gen12.MemoryType))
{
PATIdx = i;
}
}
MemoryType = (GMM_GFX_MEMORY_TYPE)GetPrivatePATEntry(PATIdx).Gen12.MemoryType;
if(MemoryType != WantedMemoryType)
{
// Failed to find a matching PAT entry
return GMM_PAT_ERROR;
}
return PATIdx;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Initializes WA's needed for setting up the Private PATs
/// WaNoMocsEllcOnly (reset)
/// WaGttPat0, WaGttPat0GttWbOverOsIommuEllcOnly, WaGttPat0WB (use from base class)
///
/// @return GMM_STATUS
///
/////////////////////////////////////////////////////////////////////////////////////
GMM_STATUS GmmLib::GmmGen12CachePolicy::SetPATInitWA()
{
GMM_STATUS Status = GMM_SUCCESS;
WA_TABLE * pWaTable = &const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable());
#if(defined(__GMM_KMD__))
__GMM_ASSERT(pGmmGlobalContext->GetSkuTable().FtrMemTypeMocsDeferPAT == 0x0); //MOCS.TargetCache supports eLLC only, PAT.TC -> reserved bits.
pWaTable->WaGttPat0WB = 0; //Override PAT #0
#else
Status = GMM_ERROR;
#endif
return Status;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Initializes the Gfx PAT tables for AdvCtx and Gfx MMIO/Private PAT
/// PAT0 = WB_COHERENT or UC depending on WaGttPat0WB
/// PAT1 = UC or WB_COHERENT depending on WaGttPat0WB
/// PAT2 = WB_MOCSLESS
/// PAT3 = WB
/// PAT4 = WT
/// PAT5 = WC
/// PAT6 = WC
/// PAT7 = WC
/// HLD says to set to PAT0/1 to WC, but since we don't have a WC in GPU,
/// WC option is same as UC. Hence setting PAT0 or PAT1 to UC.
/// Unused PAT's (5,6,7) are set to WC.
///
/// @return GMM_STATUS
/////////////////////////////////////////////////////////////////////////////////////
GMM_STATUS GmmLib::GmmGen12CachePolicy::SetupPAT()
{
GMM_STATUS Status = GMM_SUCCESS;
#if(defined(__GMM_KMD__))
uint32_t i = 0;
GMM_GFX_MEMORY_TYPE GfxMemType = GMM_GFX_UC_WITH_FENCE;
int32_t * pPrivatePATTableMemoryType = NULL;
pPrivatePATTableMemoryType = pGmmGlobalContext->GetPrivatePATTableMemoryType();
__GMM_ASSERT(pGmmGlobalContext->GetSkuTable().FtrIA32eGfxPTEs);
for(i = 0; i < GMM_NUM_GFX_PAT_TYPES; i++)
{
pPrivatePATTableMemoryType[i] = -1;
}
// Set values for GmmGlobalInfo PrivatePATTable
for(i = 0; i < GMM_NUM_PAT_ENTRIES; i++)
{
GMM_PRIVATE_PAT PAT = {0};
switch(i)
{
case PAT0:
if(pGmmGlobalContext->GetWaTable().WaGttPat0)
{
if(pGmmGlobalContext->GetWaTable().WaGttPat0WB)
{
GfxMemType = GMM_GFX_WB;
pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_COHERENT] = PAT0;
}
else
{
GfxMemType = GMM_GFX_UC_WITH_FENCE;
pPrivatePATTableMemoryType[GMM_GFX_PAT_UC] = PAT0;
}
}
else // if GTT is not tied to PAT0 then WaGttPat0WB is NA
{
GfxMemType = GMM_GFX_WB;
pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_COHERENT] = PAT0;
}
break;
case PAT1:
if(pGmmGlobalContext->GetWaTable().WaGttPat0 && !pGmmGlobalContext->GetWaTable().WaGttPat0WB)
{
GfxMemType = GMM_GFX_WB;
pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_COHERENT] = PAT1;
}
else
{
GfxMemType = GMM_GFX_UC_WITH_FENCE;
pPrivatePATTableMemoryType[GMM_GFX_PAT_UC] = PAT1;
}
break;
case PAT2:
// This PAT idx shall be used for MOCS'Less resources like Page Tables
// Page Tables have TC hardcoded to eLLC+LLC in Adv Ctxt. Hence making this to have same in Leg Ctxt.
// For BDW-H, due to Perf issue, TC has to be eLLC only for Page Tables when eDRAM is present.
GfxMemType = GMM_GFX_WB;
pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_MOCSLESS] = PAT2;
break;
case PAT3:
GfxMemType = GMM_GFX_WB;
pPrivatePATTableMemoryType[GMM_GFX_PAT_WB] = PAT3;
break;
case PAT4:
GfxMemType = GMM_GFX_WT;
pPrivatePATTableMemoryType[GMM_GFX_PAT_WT] = PAT4;
break;
case PAT5:
case PAT6:
case PAT7:
GfxMemType = GMM_GFX_WC;
pPrivatePATTableMemoryType[GMM_GFX_PAT_WC] = PAT5;
break;
default:
__GMM_ASSERT(0);
Status = GMM_ERROR;
}
PAT.Gen12.MemoryType = GfxMemType;
SetPrivatePATEntry(i, PAT);
}
#else
Status = GMM_ERROR;
#endif
return Status;
}
//=============================================================================
//
// Function: SetUpMOCSTable
//
// Desc:
//
// Parameters:
//
// Return: GMM_STATUS
//
//-----------------------------------------------------------------------------
void GmmLib::GmmGen12CachePolicy::SetUpMOCSTable()
{
GMM_CACHE_POLICY_TBL_ELEMENT *pCachePolicyTlbElement = &(pGmmGlobalContext->GetCachePolicyTlbElement()[0]);
#define GMM_DEFINE_MOCS(Index, L3_ESC, L3_SCC, L3_CC, LeCC_CC, LeCC_TC, LeCC_LRUM, LeCC_AOM, LeCC_ESC, LeCC_SCC, LeCC_PFM, LeCC_SCF, LeCC_CoS, LeCC_SelfSnoop, _HDCL1) \
{ \
pCachePolicyTlbElement[Index].L3.ESC = L3_ESC; \
pCachePolicyTlbElement[Index].L3.SCC = L3_SCC; \
pCachePolicyTlbElement[Index].L3.Cacheability = L3_CC; \
pCachePolicyTlbElement[Index].LeCC.Cacheability = LeCC_CC; \
pCachePolicyTlbElement[Index].LeCC.TargetCache = LeCC_TC; \
pCachePolicyTlbElement[Index].LeCC.LRUM = LeCC_LRUM; \
pCachePolicyTlbElement[Index].LeCC.AOM = LeCC_AOM; \
pCachePolicyTlbElement[Index].LeCC.ESC = LeCC_ESC; \
pCachePolicyTlbElement[Index].LeCC.SCC = LeCC_SCC; \
pCachePolicyTlbElement[Index].LeCC.PFM = LeCC_PFM; \
pCachePolicyTlbElement[Index].LeCC.SCF = LeCC_SCF; \
pCachePolicyTlbElement[Index].LeCC.CoS = LeCC_CoS; \
pCachePolicyTlbElement[Index].LeCC.SelfSnoop = LeCC_SelfSnoop; \
pCachePolicyTlbElement[Index].HDCL1 = _HDCL1; \
}
// clang-format off
// Fixed MOCS Table
// Index ESC SCC L3CC LeCC TC LRUM DAoM ERSC SCC PFM SCF CoS SSE HDCL1
GMM_DEFINE_MOCS( 0 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 2 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 3 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 4 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 5 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 6 , 0 , 0 , 1 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 7 , 0 , 0 , 3 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 8 , 0 , 0 , 1 , 3 , 1 , 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 9 , 0 , 0 , 3 , 3 , 1 , 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 10 , 0 , 0 , 1 , 3 , 1 , 3 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 11 , 0 , 0 , 3 , 3 , 1 , 3 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 12 , 0 , 0 , 1 , 3 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 13 , 0 , 0 , 3 , 3 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 14 , 0 , 0 , 1 , 3 , 1 , 2 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 15 , 0 , 0 , 3 , 3 , 1 , 2 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 16 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 17 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 18 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , 0 )
GMM_DEFINE_MOCS( 19 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 7 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 20 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 3 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 21 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 22 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 1 , 3 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 23 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 1 , 7 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 48 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
GMM_DEFINE_MOCS( 49 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
GMM_DEFINE_MOCS( 50 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
GMM_DEFINE_MOCS( 51 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
GMM_DEFINE_MOCS( 60 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 61 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 62 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 63 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
if(!pGmmGlobalContext->GetSkuTable().FtrLLCBypass)
{
GMM_DEFINE_MOCS( 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 17 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
GMM_DEFINE_MOCS( 61 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
}
// clang-format on
CurrentMaxMocsIndex = 23;
CurrentMaxL1HdcMocsIndex = 51;
CurrentMaxSpecialMocsIndex = 63;
#undef GMM_DEFINE_MOCS
}

View File

@ -0,0 +1,300 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#include "GmmCachePolicyConditionals.h"
#define EDRAM (SKU(FtrEDram))
#define FBLLC (SKU(FtrFrameBufferLLC))
#define NS (SKU(FtrLLCBypass))
//Macros for L3-Eviction Type
#define NA 0x0
#define RO 0x1
#define RW 0x2
#define SP 0x3
// Cache Policy Definition
// AOM = Do not allocate on miss (0 = allocate on miss [normal cache behavior], 1 = don't allocate on miss)
// LeCC_SCC = LLC/eLLC skip caching control (disabled if LeCC_SCC = 0)
// L3_SCC = L3 skip caching control (disabled if L3_SCC = 0)
// SCF = Snoop Control Field (SCF)- Only for SKL/BXT and Gen12+ (as coherent/non-coherent)
// SSO = Override MIDI self snoop settings (1 = never send to uncore, 3 = always send to uncore, 0 = [default] No override )
// CoS = Class of Service ( allowed values 1, 2, 3 for class IDs 1, 2, 3 respectively, default class 0)
// HDCL1 = HDC L1 cache control (1 = cached in HDC L1, 0 = not cached in HDC L1)
// Faster PushWrite(Gen10+) used iff !WT, eLLC-only cacheable - Globally visible surface (eg display surface) should be marked WT
// L3Evict = Type of L3-eviction (0= NA ie not L3 cacheable, 1= RO ie ReadOnly, 2 = RW ie Standard using MOCS#63), 3 = SP ie Special using MOCS#61 for non-LLC access)
//***************************************************************************************************************/
// USAGE TYPE , LLC , ELLC , L3 , WT , AGE , AOM , LeCC_SCC , L3_SCC, SCF, SSO, CoS, HDCL1, L3Evict)
/****************************************************************************************************************/
// KMD Usages
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_COMP_FRAME_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CONTEXT_SWITCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CURSOR , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DISPLAY_STATIC_IMG_FOR_SMOOTH_ROTATION_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DUMMY_PAGE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GDI_SURFACE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GENERIC_KMD_RESOURCE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
// GMM_RESOURCE_USAGE_GFX_RING is only used if WaEnableRingHostMapping is enabled.
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GFX_RING , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GTT_TRANSFER_REGION , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HW_CONTEXT , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STATE_MANAGER_KERNEL_STATE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_KMD_STAGING_SURFACE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MBM_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_NNDI_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OVERLAY_MBM , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PRIMARY_SURFACE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, NS, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SCREEN_PROTECTION_INTERMEDIATE_SURFACE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SHADOW_SURFACE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SM_SCRATCH_STATE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STATUS_PAGE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TIMER_PERF_QUEUE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UNKNOWN , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UNMAP_PAGING_RESERVED_GTT_DMA_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VSC_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_WA_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_KMD_OCA_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
//
// 3D Usages
//
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UMD_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BINDING_TABLE_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CCS , 1 , 0 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CONSTANT_BUFFER_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEPTH_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DISPLAYABLE_RENDER_TARGET , 0 , EDRAM, 1 , EDRAM , 0 , 0, 0, 0, NS, 0, 0, 0, SP );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GATHER_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_SURFACE_STATE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_DYNAMIC_STATE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_GENERAL_STATE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_GENERAL_STATE_UC , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_STATELESS_DATA_PORT , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_INDIRECT_OBJECT , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_INSTRUCTION , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HIZ , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INDEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INDEX_BUFFER_L3_COHERENT_UC , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INDEX_BUFFER_L3_CACHED , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MCS , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PUSH_CONSTANT_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PULL_CONSTANT_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_QUERY , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_RENDER_TARGET , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SHADER_RESOURCE , 0 , 1 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STAGING , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STENCIL_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STREAM_OUTPUT_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILE_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SHADER_RESOURCE_LLC_BYPASS , 0 , 1 , 1 , 0 , 0 , 0, 0, 0, NS, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MOCS_62 , 1 , 0 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_L3_EVICTION , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RW );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL , 0 , EDRAM, 1 , EDRAM , 0 , 0, 0, 0, NS, 0, 0, 0, SP );
// Tiled Resource
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_DEPTH_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_HIZ , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_MCS , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_CCS , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_RENDER_TARGET , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_RENDER_TARGET_AND_SHADER_RESOURCE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_SHADER_RESOURCE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_UAV , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UAV , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VERTEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_COHERENT_UC , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_CACHED , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OGL_WSTN_VERTEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_POSH_VERTEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_RENDER_TARGET_AND_SHADER_RESOURCE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_WDDM_HISTORY_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CONTEXT_SAVE_RESTORE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PTBR_PAGE_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PTBR_BATCH_BUFFER , 0 , 0 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
//
// CM USAGES
//
// USAGE TYPE , LLC , ELLC , L3 , WT , AGE , AOM , LeCC_SCC , L3_SCC, SCF, SSO, CoS, HDCL1, L3Evict )
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_SurfaceState, 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_StateHeap, 1 , 0 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_L1_Enabled_SurfaceState, 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_L3_SurfaceState, 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_LLC_ELLC_SurfaceState, 0 , 0 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_LLC_SurfaceState, 0 , 1 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_ELLC_SurfaceState, 1 , 0 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_LLC_L3_SurfaceState, 0 , 1 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_ELLC_L3_SurfaceState, 1 , 0 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_CACHE_SurfaceState, 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
//
// MP USAGES
//
DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_BEGIN, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_DEFAULT, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_SurfaceState, 1 , 1 , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_END, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
// MHW - SFC
DEFINE_CACHE_ELEMENT(MHW_RESOURCE_USAGE_Sfc_CurrentOutputSurface, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(MHW_RESOURCE_USAGE_Sfc_AvsLineBufferSurface, 1 , 1 , 1 , 0 , 1, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(MHW_RESOURCE_USAGE_Sfc_IefLineBufferSurface, 1 , 1 , 1 , 0 , 1, 0, 0, 0, 0, 0, 0, 0, RO );
//Media GMM Resource USAGES
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PRE_DEBLOCKING_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_POST_DEBLOCKING_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_ORIGINAL_UNCOMPRESSED_PICTURE_ENCODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_ORIGINAL_UNCOMPRESSED_PICTURE_DECODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STREAMOUT_DATA_CODEC , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INTRA_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEBLOCKINGFILTER_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_REFERENCE_PICTURE_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MACROBLOCK_STATUS_BUFFER_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFX_INDIRECT_BITSTREAM_OBJECT_DECODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFX_INDIRECT_MV_OBJECT_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFD_INDIRECT_IT_COEF_OBJECT_DECODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFC_INDIRECT_PAKBASE_OBJECT_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BSDMPC_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MPR_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BITPLANE_READ_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_AACSBIT_VECTOR_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DIRECTMV_BUFFER_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_CURR_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_REF_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MV_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_DST , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ME_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_ME_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PAK_OBJECT_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_FLATNESS_CHECK_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MBENC_CURBE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VDENC_ROW_STORE_BUFFER_CODEC , 1 , 0 , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VDENC_STREAMIN_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MD_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_SAO_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MV_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_STATUS_ERROR_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_LCU_ILDB_STREAMOUT_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_PROBABILITY_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_SEGMENT_ID_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_HVD_ROWSTORE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MACROBLOCK_ILDB_STREAM_OUT_BUFFER_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SSE_SRC_PIXEL_ROW_STORE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SLICE_STATE_STREAM_OUT_BUFFER_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CABAC_SYNTAX_STREAM_OUT_BUFFER_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PRED_COL_STORE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_UNCACHED , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ELLC_ONLY , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ELLC_LLC_ONLY , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ELLC_LLC_L3 , 1 , EDRAM , 1 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CCS_MEDIA_WRITABLE , 0 , EDRAM , 1 , EDRAM , 0, 0, 0, 0, NS, 0, 0, 0, SP );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_HISTORY_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_SOFTWARE_SCOREBOARD_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ME_MV_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MV_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_4XME_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_INTRA_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MB_STATS_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PAK_STATS_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PIC_STATE_READ_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PIC_STATE_WRITE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_COMBINED_ENC_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_CONSTANT_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_INTERMEDIATE_CU_RECORD_SURFACE_ENCODE , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_SCRATCH_ENCODE , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_LCU_LEVEL_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_HISTORY_INPUT_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_HISTORY_OUTPUT_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_DEBUG_ENCODE , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_CONSTANT_TABLE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_CU_RECORD_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_MV_TEMPORAL_BUFFER_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_CU_PACKET_FOR_PAK_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_BCOMBINED1_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_BCOMBINED2_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_FRAME_STATS_STREAMOUT_DATA_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEBLOCKINGFILTER_ROWSTORE_TILE_LINE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEBLOCKINGFILTER_ROWSTORE_TILE_COLUMN_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MD_TILE_LINE_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MD_TILE_COLUMN_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_SAO_TILE_LINE_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_SAO_TILE_COLUMN_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_PROBABILITY_COUNTER_BUFFER_CODEC , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HUC_VIRTUAL_ADDR_REGION_BUFFER_CODEC , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SIZE_STREAMOUT_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_COMPRESSED_HEADER_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PROBABILITY_DELTA_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILE_RECORD_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILE_SIZE_STAS_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MAD_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PAK_IMAGESTATE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MBENC_BRC_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MB_BRC_CONST_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_MB_QP_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_ROI_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MBDISABLE_SKIPMAP_CODEC , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_SLICE_MAP_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_WP_DOWNSAMPLED_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_VDENC_IMAGESTATE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
/**********************************************************************************/
//
// OCL Usages
//
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER_CONST , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER_CSR_UC , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_IMAGE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_INLINE_CONST , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_INLINE_CONST_HDC , 1 , 1 , 1, 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SCRATCH , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_PRIVATE_MEM , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_PRINTF_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_STATE_HEAP_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SYSTEM_MEMORY_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SYSTEM_MEMORY_BUFFER_CACHELINE_MISALIGNED , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_ISH_HEAP_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_TAG_MEMORY_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_TEXTURE_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SELF_SNOOP_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 3, 0, 0, RO );
/**********************************************************************************/
// Cross Adapter
DEFINE_CACHE_ELEMENT( GMM_RESOURCE_USAGE_XADAPTER_SHARED_RESOURCE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
/**********************************************************************************/
// BCS
DEFINE_CACHE_ELEMENT( GMM_RESOURCE_USAGE_BLT_SOURCE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
DEFINE_CACHE_ELEMENT( GMM_RESOURCE_USAGE_BLT_DESTINATION , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
/**********************************************************************************/
#include "GmmCachePolicyUndefineConditionals.h"

View File

@ -162,7 +162,7 @@ GMM_STATUS GmmLib::GmmGen8CachePolicy::SetupPAT()
{
GMM_PRIVATE_PAT PAT = {0};
if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}
@ -225,7 +225,7 @@ GMM_STATUS GmmLib::GmmGen8CachePolicy::SetupPAT()
// For BDW-H, due to Perf issue, TC has to be eLLC only for Page Tables when eDRAM is present.
GfxMemType = GMM_GFX_WB;
if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}
@ -285,10 +285,6 @@ GMM_STATUS GmmLib::GmmGen8CachePolicy::SetPATInitWA()
WA_TABLE * pWaTable = &const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable());
#if(defined(__GMM_KMD__))
if(pGmmGlobalContext->GetGtSysInfoPtr()->EdramSizeInKb)
{
pWaTable->WaNoMocsEllcOnly = 1;
}
pWaTable->WaGttPat0 = 1;
pWaTable->WaGttPat0WB = 1;

View File

@ -290,7 +290,7 @@ GMM_STATUS GmmLib::GmmGen9CachePolicy::SetupPAT()
{
GMM_PRIVATE_PAT PAT = {0};
if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}

View File

@ -283,6 +283,45 @@ GMM_SURFACESTATE_FORMAT GMM_STDCALL GmmLib::GmmClientContext::GetSurfaceStateFor
GMM_SURFACESTATE_FORMAT_INVALID;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Member function of ClientContext class for returning
/// RENDER_SURFACE_STATE::CompressionFormat
///
/// @return uint8_t
/////////////////////////////////////////////////////////////////////////////////////
uint8_t GMM_STDCALL GmmLib::GmmClientContext::GetSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format)
{
__GMM_ASSERT((Format > GMM_FORMAT_INVALID) && (Format < GMM_RESOURCE_FORMATS));
return pGmmGlobalContext->GetPlatformInfo().FormatTable[Format].CompressionFormat.AuxL1eFormat;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Member function of ClientContext class for returning
/// MEDIA_SURFACE_STATE::CompressionFormat
///
/// @return uint8_t
/////////////////////////////////////////////////////////////////////////////////////
uint8_t GMM_STDCALL GmmLib::GmmClientContext::GetMediaSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format)
{
__GMM_ASSERT((Format > GMM_FORMAT_INVALID) && (Format < GMM_RESOURCE_FORMATS));
return pGmmGlobalContext->GetPlatformInfoObj()->OverrideCompressionFormat(Format, (uint8_t)0x1);
}
/////////////////////////////////////////////////////////////////////////////////////
/// Member function of ClientContext class for returning E2E compression format
///
/// @return GMM_E2ECOMP_FORMAT
/////////////////////////////////////////////////////////////////////////////////////
GMM_E2ECOMP_FORMAT GMM_STDCALL GmmLib::GmmClientContext::GetLosslessCompressionType(GMM_RESOURCE_FORMAT Format)
{
// ToDo: Remove the definition of GmmGetLosslessCompressionType(Format)
__GMM_ASSERT((Format > GMM_FORMAT_INVALID) && (Format < GMM_RESOURCE_FORMATS));
return pGmmGlobalContext->GetPlatformInfo().FormatTable[Format].CompressionFormat.AuxL1eFormat;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Member function of ClientContext class to return InternalGpuVaMax value
/// stored in pGmmGlobalContext

View File

@ -510,4 +510,7 @@ GmmLib::PlatformInfoGen10::PlatformInfoGen10(PLATFORM &Platform)
Data.ReconMaxHeight = Data.Texture2DSurface.MaxHeight; // Reconstructed surfaces require more height and width for higher resolutions.
Data.ReconMaxWidth = Data.Texture2DSurface.MaxWidth;
Data.NoOfBitsSupported = 39;
Data.HighestAcceptablePhysicalAddress = GFX_MASK_LARGE(0, 38);
}

View File

@ -0,0 +1,425 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#include "Internal/Common/GmmLibInc.h"
#include "Internal/Common/Platform/GmmGen12Platform.h"
/************************ RT->CCS Sizing definitions ************************
H/V/D Align and Downscale factor to obtain CCS from given RT dimensions
Restrictions:
CCS's RT (2D/3D YF) alignment to 4x1 (2D/3D YF) pages sharing 1x1 Aux$line
(2D YS) 2x2 ( 2D YF) pages "
(3D YS) 2x1x2 ( 3D YF) pages " ie Slices share Aux$line
(Color MSAA'd YF) 4x1 (MSAA'dYF) pages " ie all samples share Aux$line (Samples are array'd ie YF 4KB = YF-MSAA x MSAA-Samples)
(Color MSAA 2x/4x YS) 2x2x1 ( 2D YF) pages " ie Single sample per Aux$line
(Color MSAA 8x YS) 1x2x2 ( 2D YF) pages " ie 2 samples share Aux$line
(Color MSAA 16x YS) 1x1x4 ( 2D YF) pages " ie 4 samples share Aux$line
(Depth MSAA YF) 4x1 ( 2D YF) pages " ie all samples share Aux$line
(Depth MSAA 2x/4x YS) 2x2x1 ( 2D YF) pages " ie Single sample per Aux$line
(Depth MSAA 8x YS) 1x2x2 ( 2D YF) pages " ie 2 samples share Aux$line
(Depth MSAA 16x YS) 1x1x4 ( 2D YF) pages " ie 4 samples share Aux$line
ie Depth/Color MSAA have common alignment, but due to different pixel packing (Depth MSS is interleaved, Color MSS is arrayed)
SamplePerAux$line samples are X-major (for Depth), while Y-major (for Color) packed ie For Depth MSAA, Hdownscale *=SamplePerAux$line;
for color MSAA, Vdownscale = Vdownscale; for both, MSAA-samples/SamplePerAux$line times sample shared CCS-size
HAlign: Horizontal Align in pixels
VAlign: Vertical Align in pixels
DAlign: Depth Align in pixels
HAlignxVAlignxDAlign [RT size] occupies one Aux$line
SamplesPerAux$line: Samples sharing CCS; NSamples divisor on MSAA-samples giving multiple (on shared CCS) to cover all samples
HDownscale: width divisor on CCSRTAlign`d width
VDownscale: height divisor on CCSRTAlign`d height
Convention:
(+ve) HDownscale/VDownscale are downscale factors, and used as divisors
(-ve) HDownscale/VDownscale are upscale factors, their absolute value used as multipliers
ie if HDownscale etc is smaller than 1, its reciprocal is stored with -ve sign
<---- CCSRTALIGN -----> <-- RT->CCS downscale-->
( TileMode, HAlign , VAlign, DAlign, HDownscale, VDownscale)
or
SamplesPerAux$line,
eg:
CCSRTALIGN(TILE_YF_2D_8bpe, 256, 64, 1, 16, 16 )
**********************************************************************************************************/
/////////////////////////////////////////////////////////////////////////////////////
/// Allocates This function will initialize the necessary info based on platform.
/// - Buffer type restrictions (Eg: Z, Color, Display)
/// - X/Y tile dimensions
///
/// @param[in] Platform: Contains information about platform to initialize an object
/////////////////////////////////////////////////////////////////////////////////////
GmmLib::PlatformInfoGen12::PlatformInfoGen12(PLATFORM &Platform)
: PlatformInfoGen11(Platform)
{
__GMM_ASSERTPTR(pGmmGlobalContext, VOIDRETURN);
//Compression format update
GMM_RESOURCE_FORMAT GmmFormat;
#define GMM_FORMAT_SKU(FtrXxx) (pGmmGlobalContext->GetSkuTable().FtrXxx != 0)
#define GMM_COMPR_FORMAT_INVALID (static_cast<uint8_t>(GMM_E2ECOMP_FORMAT_INVALID))
#define GMM_FORMAT(Name, bpe, _Width, _Height, _Depth, IsRT, IsASTC, RcsSurfaceFormat, SSCompressionFmt, Availability) \
\
{ \
GmmFormat = GMM_FORMAT_##Name; \
Data.FormatTable[GmmFormat].CompressionFormat.CompressionFormat = static_cast<uint8_t>(SSCompressionFmt); \
}
#include "External/Common/GmmFormatTable.h"
// --------------------------
// Surface Alignment Units
// --------------------------
// 3DSTATE_DEPTH_BUFFER
//======================================================================
// Surf Format | MSAA | HAlign | VAlign |
//======================================================================
// D16_UNORM | 1x, 4x, 16x | 8 | 8 |
// D16_UNORM | 2x, 8x | 16 | 4 |
// Not D16_UNORM | 1x,2x,4x,8x,16x | 8 | 4 |
//======================================================================
// 3DSTATE_STENCIL_BUFFER
//======================================================================
// Surf Format | MSAA | HAlign | VAlign |
//======================================================================
// N/A | N/A | 16 | 8 |
//======================================================================
Data.SurfaceMaxSize = GMM_GBYTE(16384);
Data.MaxGpuVirtualAddressBitsPerResource = 44;
//Override the Height VP9 VdEnc requirement for Gen12 16k resolution.
Data.ReconMaxHeight = GMM_KBYTE(48);
Data.ReconMaxWidth = GMM_KBYTE(32);
Data.TexAlign.Depth.Width = 8; // Not D16_UNORM
Data.TexAlign.Depth.Height = 4;
Data.TexAlign.Depth_D16_UNORM_1x_4x_16x.Width = 8;
Data.TexAlign.Depth_D16_UNORM_1x_4x_16x.Height = 8;
Data.TexAlign.Depth_D16_UNORM_2x_8x.Width = 16;
Data.TexAlign.Depth_D16_UNORM_2x_8x.Height = 4;
Data.TexAlign.SeparateStencil.Width = 16;
Data.TexAlign.SeparateStencil.Height = 8;
//CCS unit size ie cacheline
Data.TexAlign.CCS.Align.Width = 16;
Data.TexAlign.CCS.Align.Height = 4;
Data.TexAlign.CCS.Align.Depth = 1;
Data.TexAlign.CCS.MaxPitchinTiles = 1024;
// clang-format off
//Extended CCS alignment for per bpp/Tiling CCS alignment
#define CCSRTALIGN(TileMode, HAlign, VAlign, DAlign, HDownscale, VDownscale) \
{ \
TexAlignEx.CCSEx[CCS_MODE(TileMode)].Align.Width = HAlign; \
TexAlignEx.CCSEx[CCS_MODE(TileMode)].Align.Height = VAlign; \
TexAlignEx.CCSEx[CCS_MODE(TileMode)].Align.Depth = DAlign; \
TexAlignEx.CCSEx[CCS_MODE(TileMode)].Downscale.Width = HDownscale; \
TexAlignEx.CCSEx[CCS_MODE(TileMode)].Downscale.Height = VDownscale; \
TexAlignEx.CCSEx[CCS_MODE(TileMode)].Downscale.Depth = DAlign; \
}
// clang-format off
//See "RT->CCS Sizing definitions" comments above for explanation on fields
/********* TileMode HAlign, VAlign, DAlign, HDownscale, VDownscale ***/
CCSRTALIGN(TILE_YF_2D_8bpe, 256, 64, 1, 16, 16 );
CCSRTALIGN(TILE_YF_2D_16bpe, 256, 32, 1, 16, 8 );
CCSRTALIGN(TILE_YF_2D_32bpe, 128, 32, 1, 8, 8 );
CCSRTALIGN(TILE_YF_2D_64bpe, 128, 16, 1, 8, 4 );
CCSRTALIGN(TILE_YF_2D_128bpe, 64, 16, 1, 4, 4 );
CCSRTALIGN(TILE_YF_3D_8bpe, 64, 16, 16, 4, 4 );
CCSRTALIGN(TILE_YF_3D_16bpe, 32, 16, 16, 2, 4 );
CCSRTALIGN(TILE_YF_3D_32bpe, 32, 16, 8, 2, 4 );
CCSRTALIGN(TILE_YF_3D_64bpe, 32, 8, 8, 2, 2 );
CCSRTALIGN(TILE_YF_3D_128bpe, 16, 8, 8, 1, 2 );
CCSRTALIGN(TILE_YF_2D_2X_8bpe, 128, 64, 2, 8, 16 );
CCSRTALIGN(TILE_YF_2D_2X_16bpe, 128, 32, 2, 8, 8 );
CCSRTALIGN(TILE_YF_2D_2X_32bpe, 64, 32, 2, 4, 8 );
CCSRTALIGN(TILE_YF_2D_2X_64bpe, 64, 16, 2, 4, 4 );
CCSRTALIGN(TILE_YF_2D_2X_128bpe, 32, 16, 2, 2, 4 );
CCSRTALIGN(TILE_YF_2D_4X_8bpe, 128, 32, 4, 8, 8 );
CCSRTALIGN(TILE_YF_2D_4X_16bpe, 128, 16, 4, 8, 4 );
CCSRTALIGN(TILE_YF_2D_4X_32bpe, 64, 16, 4, 4, 4 );
CCSRTALIGN(TILE_YF_2D_4X_64bpe, 64, 8, 4, 4, 2 );
CCSRTALIGN(TILE_YF_2D_4X_128bpe, 32, 8, 4, 2, 2 );
CCSRTALIGN(TILE_YF_2D_8X_8bpe, 64, 32, 8, 4, 8 );
CCSRTALIGN(TILE_YF_2D_8X_16bpe, 64, 16, 8, 4, 4 );
CCSRTALIGN(TILE_YF_2D_8X_32bpe, 32, 16, 8, 2, 4 );
CCSRTALIGN(TILE_YF_2D_8X_64bpe, 32, 8, 8, 2, 2 );
CCSRTALIGN(TILE_YF_2D_8X_128bpe, 16, 8, 8, 1, 2 );
CCSRTALIGN(TILE_YF_2D_16X_8bpe, 64, 16, 16, 4, 4 );
CCSRTALIGN(TILE_YF_2D_16X_16bpe, 64, 8, 16, 4, 2 );
CCSRTALIGN(TILE_YF_2D_16X_32bpe, 32, 8, 16, 2, 2 );
CCSRTALIGN(TILE_YF_2D_16X_64bpe, 32, 4, 16, 2, 1 );
CCSRTALIGN(TILE_YF_2D_16X_128bpe, 16, 4, 16, 1, 1 );
CCSRTALIGN(TILE_YS_2D_8bpe, 128, 128, 1, 8, 32 );
CCSRTALIGN(TILE_YS_2D_16bpe, 128, 64, 1, 8, 16 );
CCSRTALIGN(TILE_YS_2D_32bpe, 64, 64, 1, 4, 16 );
CCSRTALIGN(TILE_YS_2D_64bpe, 64, 32, 1, 4, 8 );
CCSRTALIGN(TILE_YS_2D_128bpe, 32, 32, 1, 2, 8 );
CCSRTALIGN(TILE_YS_3D_8bpe, 32, 16, 32, 2, 4 );
CCSRTALIGN(TILE_YS_3D_16bpe, 16, 16, 32, 1, 4 );
CCSRTALIGN(TILE_YS_3D_32bpe, 16, 16, 16, 1, 4 );
CCSRTALIGN(TILE_YS_3D_64bpe, 16, 8, 16, 1, 2 );
CCSRTALIGN(TILE_YS_3D_128bpe, 8, 8, 16, -2, 2 );
CCSRTALIGN(TILE_YS_2D_2X_8bpe, 128, 128, 1, 8, 32 );
CCSRTALIGN(TILE_YS_2D_2X_16bpe, 128, 64, 1, 8, 16 );
CCSRTALIGN(TILE_YS_2D_2X_32bpe, 64, 64, 1, 4, 16 );
CCSRTALIGN(TILE_YS_2D_2X_64bpe, 64, 32, 1, 4, 8 );
CCSRTALIGN(TILE_YS_2D_2X_128bpe, 32, 32, 1, 2, 8 );
CCSRTALIGN(TILE_YS_2D_4X_8bpe, 128, 128, 1, 8, 32 );
CCSRTALIGN(TILE_YS_2D_4X_16bpe, 128, 64, 1, 8, 16 );
CCSRTALIGN(TILE_YS_2D_4X_32bpe, 64, 64, 1, 4, 16 );
CCSRTALIGN(TILE_YS_2D_4X_64bpe, 64, 32, 1, 4, 8 );
CCSRTALIGN(TILE_YS_2D_4X_128bpe, 32, 32, 1, 2, 8 );
CCSRTALIGN(TILE_YS_2D_8X_8bpe, 64, 128, 2, 4, 32 );
CCSRTALIGN(TILE_YS_2D_8X_16bpe, 64, 64, 2, 4, 16 );
CCSRTALIGN(TILE_YS_2D_8X_32bpe, 32, 64, 2, 2, 16 );
CCSRTALIGN(TILE_YS_2D_8X_64bpe, 32, 32, 2, 2, 8 );
CCSRTALIGN(TILE_YS_2D_8X_128bpe, 16, 32, 2, 1, 8 );
CCSRTALIGN(TILE_YS_2D_16X_8bpe, 64, 64, 4, 4, 16 );
CCSRTALIGN(TILE_YS_2D_16X_16bpe, 64, 32, 4, 4, 8 );
CCSRTALIGN(TILE_YS_2D_16X_32bpe, 32, 32, 4, 2, 8 );
CCSRTALIGN(TILE_YS_2D_16X_64bpe, 32, 16, 4, 2, 4 );
CCSRTALIGN(TILE_YS_2D_16X_128bpe, 16, 16, 4, 1, 4 );
#undef CCSRTALIGN
// clang-format on
#define FCRECTALIGN(TileMode, bpp, HAlign, VAlign, HDownscale, VDownscale) \
{ \
FCTileMode[FCMode(TileMode, bpp)].Align.Width = HAlign; \
FCTileMode[FCMode(TileMode, bpp)].Align.Height = VAlign; \
FCTileMode[FCMode(TileMode, bpp)].Align.Depth = 1; \
FCTileMode[FCMode(TileMode, bpp)].Downscale.Width = HDownscale; \
FCTileMode[FCMode(TileMode, bpp)].Downscale.Height = VDownscale; \
FCTileMode[FCMode(TileMode, bpp)].Downscale.Depth = 1; \
}
// clang-format off
FCRECTALIGN(LEGACY_TILE_Y , 8, 512, 32, 256, 16);
FCRECTALIGN(LEGACY_TILE_Y , 16, 256, 32, 128, 16);
FCRECTALIGN(LEGACY_TILE_Y , 32, 128, 32, 64, 16);
FCRECTALIGN(LEGACY_TILE_Y , 64, 64, 32, 32, 16);
FCRECTALIGN(LEGACY_TILE_Y , 128, 32, 32, 16, 16);
FCRECTALIGN(TILE_YF_2D_8bpe , 8, 256, 64, 128, 32);
FCRECTALIGN(TILE_YF_2D_16bpe , 16, 256, 32, 128, 16);
FCRECTALIGN(TILE_YF_2D_32bpe , 32, 128, 32, 64, 16);
FCRECTALIGN(TILE_YF_2D_64bpe , 64, 128, 16, 64, 8);
FCRECTALIGN(TILE_YF_2D_128bpe, 128, 64, 16, 32, 8);
FCRECTALIGN(TILE_YS_2D_8bpe , 8, 128, 128, 64, 64);
FCRECTALIGN(TILE_YS_2D_16bpe , 16, 128, 64, 64, 32);
FCRECTALIGN(TILE_YS_2D_32bpe , 32, 64, 64, 32, 32);
FCRECTALIGN(TILE_YS_2D_64bpe , 64, 64, 32, 32, 16);
FCRECTALIGN(TILE_YS_2D_128bpe, 128, 32, 32, 16, 16);
#undef FCRECTALIGN
// clang-format on
Data.NoOfBitsSupported = 39;
Data.HighestAcceptablePhysicalAddress = GFX_MASK_LARGE(0, 38);
}
void GmmLib::PlatformInfoGen12::ApplyExtendedTexAlign(uint32_t CCSMode, ALIGNMENT &UnitAlign)
{
if(CCSMode < CCS_MODES)
{
UnitAlign.Width = TexAlignEx.CCSEx[CCSMode].Align.Width;
UnitAlign.Height = TexAlignEx.CCSEx[CCSMode].Align.Height;
UnitAlign.Depth = TexAlignEx.CCSEx[CCSMode].Align.Depth;
}
}
/////////////////////////////////////////////////////////////////////////////////////
/// Copies parameters or sets flags based on info sent by the client.
///
/// @param[in] CreateParams: Flags which specify what sort of resource to create
/////////////////////////////////////////////////////////////////////////////////////
void GmmLib::PlatformInfoGen12::SetCCSFlag(GMM_RESOURCE_FLAG &Flags)
{
if(Flags.Gpu.MMC)
{
Flags.Gpu.CCS = Flags.Gpu.MMC;
}
}
/////////////////////////////////////////////////////////////////////////////////////
/// Validates the MMC parameters passed in by clients to make sure they do not
/// conflict or ask for unsupporting combinations/features.
///
/// @param[in] GMM_TEXTURE_INFO which specify what sort of resource to create
/// @return 1 is validation passed. 0 otherwise.
/////////////////////////////////////////////////////////////////////////////////////
uint8_t GmmLib::PlatformInfoGen12::ValidateMMC(GMM_TEXTURE_INFO &Surf)
{
if(Surf.Flags.Gpu.MMC && //For Media Memory Compression --
(!(GMM_IS_4KB_TILE(Surf.Flags) || GMM_IS_64KB_TILE(Surf.Flags)) &&
(!Surf.Flags.Gpu.__NonMsaaLinearCCS)))
{
return 0;
}
return 1;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Validates the parameters passed in by clients to make sure they do not
/// conflict or ask for unsupporting combinations/features.
///
/// @param[in] GMM_TEXTURE_INFO which specify what sort of resource to create
/// @return 1 is validation passed. 0 otherwise.
/////////////////////////////////////////////////////////////////////////////////////
uint8_t GmmLib::PlatformInfoGen12::ValidateCCS(GMM_TEXTURE_INFO &Surf)
{
if(!( //--- Legitimate CCS Case ----------------------------------------
((Surf.Type >= RESOURCE_2D && Surf.Type <= RESOURCE_BUFFER) && ////Not supported: 1D; Supported: Buffer, 2D, 3D, cube, Arrays, mip-maps, MSAA, Depth/Stencil
(!(Surf.Flags.Info.RenderCompressed || Surf.Flags.Info.MediaCompressed) || //Not compressed surface eg separate Aux Surf
(GMM_IS_4KB_TILE(Surf.Flags) || GMM_IS_64KB_TILE(Surf.Flags)) || //Only on Y/Ys
(Surf.Flags.Info.Linear && Surf.Type == RESOURCE_BUFFER && //Machine-Learning compression on untyped linear buffer
Surf.Flags.Info.RenderCompressed)))))
{
GMM_ASSERTDPF(0, "Invalid CCS usage!");
return 0;
}
//Compressed resource (main surf) must pre-define MC/RC type
if(!(Surf.Flags.Gpu.__NonMsaaTileYCcs || Surf.Flags.Gpu.__NonMsaaLinearCCS) &&
!Surf.Flags.Gpu.ProceduralTexture &&
!(Surf.Flags.Info.RenderCompressed || Surf.Flags.Info.MediaCompressed))
{
GMM_ASSERTDPF(0, "Invalid CCS usage - RC/MC type unspecified!");
return 0;
}
if(Surf.Flags.Info.RenderCompressed && Surf.Flags.Info.MediaCompressed)
{
GMM_ASSERTDPF(0, "Invalid CCS usage - can't be both RC and MC!");
return 0;
}
if(!pGmmGlobalContext->GetSkuTable().FtrLinearCCS &&
(Surf.Type == RESOURCE_3D || Surf.MaxLod > 0 || Surf.MSAA.NumSamples > 1 ||
!(Surf.Flags.Info.TiledYf || GMM_IS_64KB_TILE(Surf.Flags))))
{
GMM_ASSERTDPF(0, "CCS support for (volumetric, mip'd, MSAA'd, TileY) resources only enabled with Linear CCS!");
return 0;
}
GMM_ASSERTDPF((Surf.Flags.Wa.PreGen12FastClearOnly == 0), "FastClear Only unsupported on Gen12+!");
Surf.Flags.Wa.PreGen12FastClearOnly = 0;
return 1;
}
/////////////////////////////////////////////////////////////////////////////////////
/// Validates the UnifiedAuxSurface parameters passed in by clients to make sure they do not
/// conflict or ask for unsupporting combinations/features.
///
/// @param[in] GMM_TEXTURE_INFO which specify what sort of resource to create
/// @return 1 is validation passed. 0 otherwise.
/////////////////////////////////////////////////////////////////////////////////////
uint8_t GmmLib::PlatformInfoGen12::ValidateUnifiedAuxSurface(GMM_TEXTURE_INFO &Surf)
{
if((Surf.Flags.Gpu.UnifiedAuxSurface) &&
!( //--- Legitimate UnifiedAuxSurface Case ------------------------------------------
Surf.Flags.Gpu.CCS &&
((Surf.MSAA.NumSamples <= 1 && (Surf.Flags.Gpu.RenderTarget || Surf.Flags.Gpu.Texture)) ||
((Surf.Flags.Gpu.Depth || Surf.Flags.Gpu.SeparateStencil || Surf.MSAA.NumSamples > 1)))))
{
GMM_ASSERTDPF(0, "Invalid UnifiedAuxSurface usage!");
return 0;
}
return 1;
}
//=============================================================================
//
// Function: CheckFmtDisplayDecompressible
//
// Desc: Returns true if display hw supports lossless render/media decompression
// else returns false. Restrictions are from
// Umds can call it to decide if full resolve is required
//
// Parameters:
// See function arguments.
//
// Returns:
// uint8_t
//-----------------------------------------------------------------------------
uint8_t GmmLib::PlatformInfoGen12::CheckFmtDisplayDecompressible(GMM_TEXTURE_INFO &Surf,
bool IsSupportedRGB64_16_16_16_16,
bool IsSupportedRGB32_8_8_8_8,
bool IsSupportedRGB32_2_10_10_10,
bool IsSupportedMediaFormats)
{
//Check fmt is display decompressible
if(((Surf.Flags.Info.RenderCompressed || Surf.Flags.Info.MediaCompressed) &&
(IsSupportedRGB64_16_16_16_16 || //RGB64 16:16 : 16 : 16 FP16
IsSupportedRGB32_8_8_8_8 || //RGB32 8 : 8 : 8 : 8
IsSupportedRGB32_2_10_10_10)) || //RGB32 2 : 10 : 10 : 10) ||
(Surf.Flags.Info.MediaCompressed && IsSupportedMediaFormats)) //YUV444 - Y412, Y416
{
//Display supports compression on TileY, but not Yf/Ys (deprecated for display support)
if(GMM_IS_4KB_TILE(Surf.Flags) &&
!(Surf.Flags.Info.TiledYf || GMM_IS_64KB_TILE(Surf.Flags)))
{
return true;
}
}
return false;
}
//=============================================================================
//
// Function: OverrideCompressionFormat
//
// Desc: SurfaceState compression format encoding differ for MC vs RC on few formats. This function
// overrides default RC encoding for MC requests
//
// Parameters:
// See function arguments.
//
// Returns:
// uint8_t
//-----------------------------------------------------------------------------
uint8_t GmmLib::PlatformInfoGen12::OverrideCompressionFormat(GMM_RESOURCE_FORMAT Format, uint8_t IsMC)
{
return Data.FormatTable[Format].CompressionFormat.CompressionFormat;
}

View File

@ -494,4 +494,7 @@ GmmLib::PlatformInfoGen9::PlatformInfoGen9(PLATFORM &Platform)
Data.ReconMaxHeight = Data.Texture2DSurface.MaxHeight; // Reconstructed surfaces require more height and width for higher resolutions.
Data.ReconMaxWidth = Data.Texture2DSurface.MaxWidth;
Data.NoOfBitsSupported = 39;
Data.HighestAcceptablePhysicalAddress = GFX_MASK_LARGE(0, 38);
}

View File

@ -37,23 +37,24 @@ GmmLib::PlatformInfo::PlatformInfo(PLATFORM &Platform)
#define GMM_FORMAT_GEN(X) (GFX_GET_CURRENT_RENDERCORE(Data.Platform) >= IGFX_GEN##X##_CORE)
#define GMM_FORMAT_SKU(FtrXxx) (pGmmGlobalContext->GetSkuTable().FtrXxx != 0)
#define GMM_FORMAT_WA(WaXxx) (pGmmGlobalContext->GetWaTable().WaXxx != 0)
#define GMM_FORMAT(Name, bpe, _Width, _Height, _Depth, IsRT, IsASTC, RcsSurfaceFormat, AuxL1Format, Availability) \
\
{ \
GmmFormat = GMM_FORMAT_##Name; \
Data.FormatTable[GmmFormat].ASTC = (IsASTC); \
Data.FormatTable[GmmFormat].Element.BitsPer = (bpe); \
Data.FormatTable[GmmFormat].Element.Depth = (_Depth); \
Data.FormatTable[GmmFormat].Element.Height = (_Height); \
Data.FormatTable[GmmFormat].Element.Width = (_Width); \
Data.FormatTable[GmmFormat].RenderTarget = ((IsRT) != 0); \
Data.FormatTable[GmmFormat].SurfaceStateFormat = ((GMM_SURFACESTATE_FORMAT)(RcsSurfaceFormat)); \
Data.FormatTable[GmmFormat].Reserved = ((uint32_t)(AuxL1Format)); \
Data.FormatTable[GmmFormat].Supported = ((Availability) != 0); \
if(((_Depth) > 1) || ((_Height) > 1) || ((_Width) > 1)) \
{ \
Data.FormatTable[GmmFormat].Compressed = 1; \
} \
#define GMM_COMPR_FORMAT_INVALID GMM_E2ECOMP_FORMAT_INVALID
#define GMM_FORMAT(Name, bpe, _Width, _Height, _Depth, IsRT, IsASTC, RcsSurfaceFormat, SSCompressionFmt, Availability) \
\
{ \
GmmFormat = GMM_FORMAT_##Name; \
Data.FormatTable[GmmFormat].ASTC = (IsASTC); \
Data.FormatTable[GmmFormat].Element.BitsPer = (bpe); \
Data.FormatTable[GmmFormat].Element.Depth = (_Depth); \
Data.FormatTable[GmmFormat].Element.Height = (_Height); \
Data.FormatTable[GmmFormat].Element.Width = (_Width); \
Data.FormatTable[GmmFormat].RenderTarget = ((IsRT) != 0); \
Data.FormatTable[GmmFormat].SurfaceStateFormat = ((GMM_SURFACESTATE_FORMAT)(RcsSurfaceFormat)); \
Data.FormatTable[GmmFormat].CompressionFormat.CompressionFormat = (SSCompressionFmt); \
Data.FormatTable[GmmFormat].Supported = ((Availability) != 0); \
if(((_Depth) > 1) || ((_Height) > 1) || ((_Width) > 1)) \
{ \
Data.FormatTable[GmmFormat].Compressed = 1; \
} \
}
#include "External/Common/GmmFormatTable.h"

View File

@ -53,6 +53,13 @@ namespace GmmLib {
GMM_UNREFERENCED_PARAMETER(UnitAlign);
}
virtual uint8_t OverrideCompressionFormat(GMM_RESOURCE_FORMAT Format, uint8_t IsMC)
{
GMM_UNREFERENCED_PARAMETER(Format);
GMM_UNREFERENCED_PARAMETER(IsMC);
return 0;
}
void SetDataSurfaceMaxSize(uint64_t Size)
{
Data.SurfaceMaxSize = Size;

View File

@ -147,6 +147,7 @@ bool GmmLib::GmmResourceInfoCommon::CopyClientParams(GMM_RESCREATE_PARAMS &Creat
{
//GMM_ASSERTDPF(Surf.Flags.Gpu.HiZ, "Lossless Z compression supported when Depth+HiZ+CCS is unified");
AuxSecSurf = Surf;
AuxSecSurf.Type = AuxSecSurf.Type;
Surf.Flags.Gpu.HiZ = 0; //Its depth buffer, so clear HiZ
AuxSecSurf.Flags.Gpu.HiZ = 0;
AuxSurf.Flags.Gpu.IndirectClearColor = 0; //Clear Depth flags from HiZ, contained with separate/legacy HiZ when Depth isn't compressible.
@ -163,17 +164,24 @@ bool GmmLib::GmmResourceInfoCommon::CopyClientParams(GMM_RESCREATE_PARAMS &Creat
return false;
}
Surf.Flags.Gpu.CCS = 1;
AuxSurf.Type = AuxSurf.Type;
}
else if(Surf.MSAA.NumSamples > 1 && Surf.Flags.Gpu.CCS) //MSAA+MCS+CCS
{
GMM_ASSERTDPF(Surf.Flags.Gpu.MCS, "Lossless MSAA supported when MSAA+MCS+CCS is unified");
AuxSecSurf = Surf;
AuxSecSurf.Type = AuxSecSurf.Type;
AuxSecSurf.Flags.Gpu.MCS = 0;
AuxSurf.Flags.Gpu.CCS = 0;
AuxSurf.Flags.Info.RenderCompressed = AuxSurf.Flags.Info.MediaCompressed = 0;
}
else if(Surf.Flags.Gpu.CCS)
{
AuxSurf.Type = AuxSurf.Type;
}
if(GMM_SUCCESS != pTextureCalc->PreProcessTexSpecialCases(&AuxSurf))
if(AuxSurf.Type != RESOURCE_INVALID &&
GMM_SUCCESS != pTextureCalc->PreProcessTexSpecialCases(&AuxSurf))
{
return false;
}
@ -346,6 +354,13 @@ uint8_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::ValidateParams()
goto ERROR_CASE;
}
if((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) > IGFX_GEN11_CORE) &&
Surf.Flags.Info.TiledW)
{
GMM_ASSERTDPF(0, "Flag not supported on this platform.");
goto ERROR_CASE;
}
if((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) < IGFX_GEN9_CORE) &&
#if(_DEBUG || _RELEASE_INTERNAL)
!pGmmGlobalContext->GetWaTable().WaDisregardPlatformChecks &&
@ -390,7 +405,7 @@ uint8_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::ValidateParams()
}
}
GetRestrictions(Restrictions);
pTextureCalc->GetResRestrictions(&Surf, Restrictions);
// Check array size to make sure it meets HW limits
if((Surf.ArraySize > Restrictions.MaxArraySize) &&
@ -529,9 +544,10 @@ uint8_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::ValidateParams()
// IndirectClearColor Restrictions
if((Surf.Flags.Gpu.IndirectClearColor) &&
!( //--- Legitimate IndirectClearColor Case ------------------------------------------
(GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) >= IGFX_GEN9_CORE) &&
Surf.Flags.Gpu.UnifiedAuxSurface
))
((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) >= IGFX_GEN9_CORE) &&
Surf.Flags.Gpu.UnifiedAuxSurface) ||
((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) > IGFX_GEN11_CORE) &&
(Surf.Flags.Gpu.HiZ || Surf.Flags.Gpu.SeparateStencil))))
{
GMM_ASSERTDPF(0, "Invalid IndirectClearColor usage!");
goto ERROR_CASE;
@ -667,6 +683,7 @@ uint8_t GMM_STDCALL GmmLib::GmmResourceInfoCommon::GetDisplayCompressionSupport(
case GMM_FORMAT_B10G10R10A2_UINT:
case GMM_FORMAT_B10G10R10A2_UNORM_SRGB:
case GMM_FORMAT_B10G10R10A2_USCALED:
case GMM_FORMAT_R10G10B10_FLOAT_A2_UNORM:
case GMM_FORMAT_R10G10B10_SNORM_A2_UNORM:
case GMM_FORMAT_R10G10B10A2_SINT:
case GMM_FORMAT_R10G10B10A2_SNORM:

View File

@ -577,7 +577,7 @@ void GmmLib::GmmTextureCalc::GetResRestrictions(GMM_TEXTURE_INFO * pTexinfo,
if(pTexinfo->Flags.Info.RenderCompressed ||
pTexinfo->Flags.Info.MediaCompressed)
{
Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(16));
Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(64));
}
GMM_DPF_EXIT;

View File

@ -395,6 +395,13 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTex2D(GMM_TEXTURE_INFO *
AlignedWidth = __GMM_EXPAND_WIDTH(this, Width, HAlign, pTexInfo);
// For Non - planar surfaces, the alignment is done on the entire height of the allocation
if(pGmmGlobalContext->GetWaTable().WaAlignYUVResourceToLCU &&
GmmIsYUVFormatLCUAligned(pTexInfo->Format))
{
AlignedWidth = GFX_ALIGN(AlignedWidth, GMM_SCANLINES(GMM_MAX_LCU_SIZE));
}
// Calculate special pitch case of small dimensions where LOD1 + LOD2 widths
// are greater than LOD0. e.g. dimensions 4x4 and MinPitch == 1
if((pTexInfo->Flags.Info.TiledYf || pTexInfo->Flags.Info.TiledYs) &&
@ -527,11 +534,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
__GMM_ASSERTPTR(pTexInfo, GMM_ERROR);
__GMM_ASSERTPTR(pRestrictions, GMM_ERROR);
__GMM_ASSERT(!pTexInfo->Flags.Info.TiledW);
// Client should always give us linear-fallback option for planar surfaces,
// except for MMC surfaces, which are TileY.
//__GMM_ASSERT(pTexInfo->Flags.Info.Linear || pTexInfo->Flags.Gpu.MMC);
pTexInfo->Flags.Info.Linear = 1;
pTexInfo->TileMode = TILE_NONE;
pTexInfo->TileMode = TILE_NONE;
const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
@ -569,10 +572,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
Height = YHeight + 2 * VHeight; // One VHeight for V and one for U.
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@ -590,10 +590,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
Height = YHeight + 2 * VHeight;
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@ -644,10 +641,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
Height = YHeight + 2 * VHeight;
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
YHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@ -668,11 +662,6 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
Height = YHeight + VHeight;
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U/V are properly aligned, vertically).
0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
// With SURFACE_STATE.XOffset support, the U-V interface has
// much lighter restrictions--which will be naturally met by
// surface pitch restrictions (i.e. dividing an IMC2/4 pitch
@ -681,7 +670,8 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
// Not technically UV packed but sizing works out the same
// if the resource is std swizzled
UVPacked = pTexInfo->Flags.Info.StdSwizzle ? true : false;
UVPacked = true;
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
@ -722,11 +712,6 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
(pTexInfo->Format == GMM_FORMAT_P208))
{
WidthBytesPhysical = GFX_ALIGN(WidthBytesPhysical, 2); // If odd YWidth, pitch bumps-up to fit rounded-up U/V planes.
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so UV is properly aligned).
0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
}
else //if(pTexInfo->Format == GMM_FORMAT_NV11)
{
@ -738,8 +723,8 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
pTexInfo->Flags.Info.Linear = 1;
}
UVPacked = true;
UVPacked = true;
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
case GMM_FORMAT_I420: // IYUV & I420: are identical to YV12 except,
@ -788,6 +773,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
pTexInfo->Flags.Info.TiledX = 0;
pTexInfo->Flags.Info.Linear = 1;
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 1;
break;
}
default:
@ -802,8 +788,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
SetTileMode(pTexInfo);
// If the Surface has Odd height dimension, we will fall back to Linear Format.
// If MMC is enabled, disable MMC during such cases.
// MMC is not supported for linear formats.
if(pTexInfo->Flags.Gpu.MMC)
{
if(!(pTexInfo->Flags.Info.TiledY || pTexInfo->Flags.Info.TiledYf || pTexInfo->Flags.Info.TiledYs))
@ -849,24 +834,25 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
Height += AdjustedVHeight - VHeight;
}
// For std swizzled and UV packed tile Ys/Yf cases, the planes
// must be tile-boundary aligned. Actual alignment is handled
// in FillPlanarOffsetAddress, but height and width must
// be adjusted for correct size calculation
if((pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf) &&
(pTexInfo->Flags.Info.StdSwizzle || UVPacked))
// For Tiled Planar surfaces, the planes must be tile-boundary aligned.
// Actual alignment is handled in FillPlanarOffsetAddress, but height
// and width must be adjusted for correct size calculation
if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]))
{
uint32_t TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
uint32_t TileWidth = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileWidth;
pTexInfo->OffsetInfo.Plane.IsTileAlignedPlanes = true;
//for separate U and V planes, use U plane unaligned and V plane aligned
Height = GFX_ALIGN(YHeight, TileHeight) + (UVPacked ? GFX_ALIGN(AdjustedVHeight, TileHeight) :
(GFX_ALIGN(VHeight, TileHeight) + GFX_ALIGN(AdjustedVHeight, TileHeight)));
if(UVPacked)
if(pTexInfo->Format == GMM_FORMAT_IMC2 || // IMC2, IMC4 needs even tile columns
pTexInfo->Format == GMM_FORMAT_IMC4)
{
// If the UV planes are packed then the surface pitch must be
// padded out so that the tile-aligned UV data will fit.
// If the U & V planes are side-by-side then the surface pitch must be
// padded out so that U and V planes will being on a tile boundary.
// This means that an odd Y plane width must be padded out
// with an additional tile. Even widths do not need padding
uint32_t TileCols = GFX_CEIL_DIV(WidthBytesPhysical, TileWidth);
@ -876,9 +862,13 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmGen10TextureCalc::FillTexPlanar(GMM_TEXTURE_IN
}
}
pTexInfo->Flags.Info.RedecribedPlanes = 1;
if(pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf)
{
pTexInfo->Flags.Info.RedecribedPlanes = true;
}
}
// Vary wide planar tiled planar formats do not support MMC pre gen11. All formats do not support
//Special case LKF MMC compressed surfaces
if(pTexInfo->Flags.Gpu.MMC &&
pTexInfo->Flags.Gpu.UnifiedAuxSurface &&

File diff suppressed because it is too large Load Diff

View File

@ -34,8 +34,10 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
{
GMM_GFX_SIZE_T *pUOffsetX, *pUOffsetY;
GMM_GFX_SIZE_T *pVOffsetX, *pVOffsetY;
uint32_t YHeight = 0, VHeight = 0;
bool UVPacked = false;
uint32_t Height;
uint32_t WidthBytesPhysical = GFX_ULONG_CAST(pTexInfo->BaseWidth) * pTexInfo->BitsPerPixel >> 3;
#define SWAP_UV() \
{ \
@ -54,6 +56,8 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
__GMM_ASSERTPTR(((pTexInfo->TileMode < GMM_TILE_MODES) && (pTexInfo->TileMode >= TILE_NONE)), VOIDRETURN);
GMM_DPF_ENTER;
const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
// GMM_PLANE_Y always at (0, 0)...
pTexInfo->OffsetInfo.Plane.X[GMM_PLANE_Y] = 0;
pTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_Y] = 0;
@ -100,9 +104,11 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
// VVVVVVVV
{
*pUOffsetX = 0;
YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = 0;
VHeight = GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 2), GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetY =
GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT) +
GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 2), GMM_IMCx_PLANE_ROW_ALIGNMENT);
@ -118,9 +124,11 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
//VVVVVVVV
{
*pUOffsetX = 0;
YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = 0;
VHeight = GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 4), GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetY =
GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT) +
GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 4), GMM_IMCx_PLANE_ROW_ALIGNMENT);
@ -170,9 +178,11 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
// VVVVVVVV
{
*pUOffsetX = 0;
YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = 0;
VHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT) * 2;
break;
@ -191,11 +201,16 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
__GMM_ASSERT((pTexInfo->Pitch & 1) == 0);
*pUOffsetX = 0;
YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = pTexInfo->Pitch / 2;
VHeight = GFX_CEIL_DIV(YHeight, 2);
*pVOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
// Not technically UV packed but sizing works out the same
UVPacked = true;
break;
}
case GMM_FORMAT_I420: // I420 = IYUV
@ -242,6 +257,8 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
*pUOffsetX = UOffset % pTexInfo->Pitch;
*pUOffsetY = UOffset / pTexInfo->Pitch;
YHeight = GFX_CEIL_DIV(YSize + 2 * VSize, WidthBytesPhysical);
break;
}
case GMM_FORMAT_NV12:
@ -258,7 +275,21 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
// YYYYYYYY
// [UV-Packing]
*pUOffsetX = *pVOffsetX = 0;
*pUOffsetY = *pVOffsetY = Height;
YHeight = GFX_ALIGN(Height, __GMM_EVEN_ROW);
*pUOffsetY = *pVOffsetY = YHeight;
if((pTexInfo->Format == GMM_FORMAT_NV12) ||
(pTexInfo->Format == GMM_FORMAT_NV21) ||
(pTexInfo->Format == GMM_FORMAT_P010) ||
(pTexInfo->Format == GMM_FORMAT_P012) ||
(pTexInfo->Format == GMM_FORMAT_P016))
{
VHeight = GFX_CEIL_DIV(Height, 2);
}
else
{
VHeight = YHeight; // U/V plane is same as Y
}
UVPacked = true;
break;
@ -270,9 +301,19 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
}
}
if(((pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf) &&
(pTexInfo->Flags.Info.StdSwizzle || UVPacked)) ||
pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_Y] = YHeight;
if(pTexInfo->OffsetInfo.Plane.NoOfPlanes == 2)
{
pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U] = VHeight;
}
else if(pTexInfo->OffsetInfo.Plane.NoOfPlanes == 3)
{
pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U] =
pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_V] = VHeight;
}
if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]) || pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
{
GMM_GFX_SIZE_T TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
GMM_GFX_SIZE_T TileWidth = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileWidth;
@ -280,7 +321,9 @@ void GmmLib::GmmTextureCalc::FillPlanarOffsetAddress(GMM_TEXTURE_INFO *pTexInfo)
*pUOffsetX = GFX_ALIGN(*pUOffsetX, TileWidth);
*pUOffsetY = GFX_ALIGN(*pUOffsetY, TileHeight);
*pVOffsetX = GFX_ALIGN(*pVOffsetX, TileWidth);
*pVOffsetY = GFX_ALIGN(*pVOffsetY, TileHeight);
*pVOffsetY = UVPacked ?
GFX_ALIGN(*pVOffsetY, TileHeight) :
GFX_ALIGN(YHeight, TileHeight) + GFX_ALIGN(VHeight, TileHeight);
if(pTexInfo->Flags.Gpu.UnifiedAuxSurface && pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
{

View File

@ -253,7 +253,9 @@ GMM_INLINE GMM_STATUS __GmmTexFillHAlignVAlign(GMM_TEXTURE_INFO *pTexInfo)
UnitAlignHeight = pPlatform->TexAlign.XAdapter.Height;
UnitAlignWidth = pPlatform->TexAlign.XAdapter.Width;
}
else if(((pTexInfo->Flags.Gpu.CCS && GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN9_CORE)) &&
else if(((pTexInfo->Flags.Gpu.MCS &&
GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN12_CORE) ||
(pTexInfo->Flags.Gpu.CCS && GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN9_CORE)) &&
(pTexInfo->MSAA.NumSamples > 1))
{
UnitAlignWidth = 16;

View File

@ -980,11 +980,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
__GMM_ASSERTPTR(pTexInfo, GMM_ERROR);
__GMM_ASSERTPTR(pRestrictions, GMM_ERROR);
__GMM_ASSERT(!pTexInfo->Flags.Info.TiledW);
// Client should always give us linear-fallback option for planar surfaces,
// except for MMC surfaces, which are TileY.
//__GMM_ASSERT(pTexInfo->Flags.Info.Linear || pTexInfo->Flags.Gpu.MMC);
pTexInfo->Flags.Info.Linear = 1;
pTexInfo->TileMode = TILE_NONE;
pTexInfo->TileMode = TILE_NONE;
const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
@ -1022,10 +1018,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
Height = YHeight + 2 * VHeight; // One VHeight for V and one for U.
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@ -1043,10 +1036,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
Height = YHeight + 2 * VHeight;
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@ -1101,10 +1091,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
Height = YHeight + 2 * VHeight;
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
YHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@ -1125,11 +1112,6 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
Height = YHeight + VHeight;
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so U/V are properly aligned, vertically).
0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
// With SURFACE_STATE.XOffset support, the U-V interface has
// much lighter restrictions--which will be naturally met by
// surface pitch restrictions (i.e. dividing an IMC2/4 pitch
@ -1138,7 +1120,8 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
// Not technically UV packed but sizing works out the same
// if the resource is std swizzled
UVPacked = pTexInfo->Flags.Info.StdSwizzle ? true : false;
UVPacked = true;
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
@ -1155,7 +1138,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
// YYYYYYYY
// YYYYYYYY
// [UV-Packing]
YHeight = GFX_ALIGN(pTexInfo->BaseHeight, __GMM_EVEN_ROW);
if((pTexInfo->Format == GMM_FORMAT_NV12) ||
(pTexInfo->Format == GMM_FORMAT_NV21) ||
(pTexInfo->Format == GMM_FORMAT_P010) ||
@ -1179,11 +1162,6 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
(pTexInfo->Format == GMM_FORMAT_P208))
{
WidthBytesPhysical = GFX_ALIGN(WidthBytesPhysical, 2); // If odd YWidth, pitch bumps-up to fit rounded-up U/V planes.
FillTexPlanar_SetTilingBasedOnRequiredAlignment(
pTexInfo,
YHeight, true, // <-- YHeight alignment needed (so UV is properly aligned).
0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
}
else //if(pTexInfo->Format == GMM_FORMAT_NV11)
{
@ -1195,8 +1173,8 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
pTexInfo->Flags.Info.Linear = 1;
}
UVPacked = true;
UVPacked = true;
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
case GMM_FORMAT_I420: // IYUV & I420: are identical to YV12 except,
@ -1239,12 +1217,12 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
Height = GFX_CEIL_DIV(YSize + UVSize, WidthBytesPhysical);
// Tiling not supported, since YPitch != UVPitch...
pTexInfo->Flags.Info.TiledY = 0;
pTexInfo->Flags.Info.TiledYf = 0;
pTexInfo->Flags.Info.TiledYs = 0;
pTexInfo->Flags.Info.TiledX = 0;
pTexInfo->Flags.Info.Linear = 1;
pTexInfo->Flags.Info.TiledY = 0;
pTexInfo->Flags.Info.TiledYf = 0;
pTexInfo->Flags.Info.TiledYs = 0;
pTexInfo->Flags.Info.TiledX = 0;
pTexInfo->Flags.Info.Linear = 1;
pTexInfo->OffsetInfo.Plane.NoOfPlanes = 1;
break;
}
default:
@ -1259,8 +1237,7 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
SetTileMode(pTexInfo);
// If the Surface has Odd height dimension, we will fall back to Linear Format.
// If MMC is enabled, disable MMC during such cases.
// MMC is not supported for linear formats.
if(pTexInfo->Flags.Gpu.MMC)
{
if(!(pTexInfo->Flags.Info.TiledY || pTexInfo->Flags.Info.TiledYf || pTexInfo->Flags.Info.TiledYs))
@ -1298,19 +1275,20 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
pRestrictions->RenderPitchAlignment <<= LShift;
}
// For std swizzled and UV packed tile Ys/Yf cases, the planes
// must be tile-boundary aligned. Actual alignment is handled
// in FillPlanarOffsetAddress, but height and width must
// be adjusted for correct size calculation
if((pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf) &&
(pTexInfo->Flags.Info.StdSwizzle || UVPacked))
// For Tiled Planar surfaces, the planes must be tile-boundary aligned.
// Actual alignment is handled in FillPlanarOffsetAddress, but height
// and width must be adjusted for correct size calculation
if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]))
{
uint32_t TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
uint32_t TileWidth = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileWidth;
pTexInfo->OffsetInfo.Plane.IsTileAlignedPlanes = true;
Height = GFX_ALIGN(YHeight, TileHeight) + (GFX_ALIGN(VHeight, TileHeight) * (UVPacked ? 1 : 2));
if(UVPacked)
if(pTexInfo->Format == GMM_FORMAT_IMC2 || // IMC2, IMC4 needs even tile columns
pTexInfo->Format == GMM_FORMAT_IMC4)
{
// If the UV planes are packed then the surface pitch must be
// padded out so that the tile-aligned UV data will fit.
@ -1323,7 +1301,10 @@ GMM_STATUS GMM_STDCALL GmmLib::GmmTextureCalc::FillTexPlanar(GMM_TEXTURE_INFO *
}
}
pTexInfo->Flags.Info.RedecribedPlanes = 1;
if(pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf)
{
pTexInfo->Flags.Info.RedecribedPlanes = true;
}
}
//Special case LKF MMC compressed surfaces
@ -1610,4 +1591,4 @@ void GMM_STDCALL GmmLib::GmmTextureCalc::AllocateOneTileThanRequied(GMM_TEXTURE_
WidthBytesRender += pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileWidth;
WidthBytesPhysical = WidthBytesLock = WidthBytesRender;
}
}
}

View File

@ -1,131 +1,141 @@
# Copyright(c) 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files(the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and / or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
set (EXE_NAME GMMULT)
set(GMMULT_HEADERS
GmmCachePolicyULT.h
GmmCommonULT.h
GmmGen10CachePolicyULT.h
GmmGen10ResourceULT.h
GmmGen11CachePolicyULT.h
GmmGen11ResourceULT.h
GmmGen9CachePolicyULT.h
GmmGen9ResourceULT.h
GmmResourceULT.h
stdafx.h
targetver.h
)
set(GMMULT_SOURCES
GmmCachePolicyULT.cpp
GmmCommonULT.cpp
GmmGen10CachePolicyULT.cpp
GmmGen10ResourceULT.cpp
GmmGen11CachePolicyULT.cpp
GmmGen11ResourceULT.cpp
GmmGen9CachePolicyULT.cpp
GmmGen9ResourceULT.cpp
GmmResourceCpuBltULT.cpp
GmmResourceULT.cpp
googletest/src/gtest-all.cc
GmmULT.cpp
)
source_group("Source Files\\Cache Policy" FILES
GmmCachePolicyULT.cpp
GmmGen9CachePolicyULT.cpp
GmmGen10CachePolicyULT.cpp
GmmGen11CachePolicyULT.cpp
)
source_group("Source Files\\Resource" FILES
GmmGen10ResourceULT.cpp
GmmGen9ResourceULT.cpp
GmmResourceCpuBltULT.cpp
GmmResourceULT.cpp
)
source_group("Header Files\\Cache Policy" FILES
GmmCachePolicyULT.h
GmmGen10CachePolicyULT.h
GmmGen11CachePolicyULT.h
GmmGen9CachePolicyULT.h
)
source_group("Header Files\\Resource" FILES
GmmGen10ResourceULT.h
GmmGen9ResourceULT.h
GmmResourceULT.h
)
source_group("gtest" FILES
googletest/gtest/gtest.h
googletest/src/gtest-all.cc
)
include_directories(BEFORE ./)
include_directories(BEFORE ${PROJECT_SOURCE_DIR})
include_directories(
googletest
googletest/gtest
${BS_DIR_INC}/umKmInc
${BS_DIR_INC}
${BS_DIR_GMMLIB}/inc
${BS_DIR_INC}/common
)
macro(GmmLibULTSetTargetConfig ultTarget)
if (TARGET ${ultTarget})
set_property(TARGET ${ultTarget} APPEND PROPERTY COMPILE_DEFINITIONS
$<$<CONFIG:Release>: _RELEASE>
$<$<CONFIG:ReleaseInternal>: _RELEASE_INTERNAL>
$<$<CONFIG:Debug>: _DEBUG>
)
endif()
endmacro()
add_executable(${EXE_NAME} ${GMMULT_HEADERS} ${GMMULT_SOURCES})
GmmLibULTSetTargetConfig(${EXE_NAME})
set_property(TARGET ${EXE_NAME} APPEND PROPERTY COMPILE_DEFINITIONS __GMM GMM_LIB_DLL __UMD)
if(NOT TARGET igfx_gmmumd_dll)
add_subdirectory("${BS_DIR_GMMLIB}" "${CMAKE_BINARY_DIR}/gmmlib/ult")
endif()
target_link_libraries(${EXE_NAME} igfx_gmmumd_dll)
target_link_libraries(${EXE_NAME}
pthread
dl
)
add_custom_target(Run_ULT ALL DEPENDS GMMULT)
add_custom_command(
TARGET Run_ULT
POST_BUILD
COMMAND echo running ULTs
COMMAND "${CMAKE_COMMAND}" -E env "LD_LIBRARY_PATH=$<TARGET_FILE_DIR:igfx_gmmumd_dll>" ${CMAKE_CFG_INTDIR}/${EXE_NAME} --gtest_filter=CTest*
# Copyright(c) 2017 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files(the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and / or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
set (EXE_NAME GMMULT)
set(GMMULT_HEADERS
GmmCachePolicyULT.h
GmmCommonULT.h
GmmGen10CachePolicyULT.h
GmmGen10ResourceULT.h
GmmGen11CachePolicyULT.h
GmmGen11ResourceULT.h
GmmGen12ResourceULT.h
GmmGen12CachePolicyULT.h
GmmGen9CachePolicyULT.h
GmmGen9ResourceULT.h
GmmResourceULT.h
stdafx.h
targetver.h
)
set(GMMULT_SOURCES
GmmCachePolicyULT.cpp
GmmCommonULT.cpp
GmmGen10CachePolicyULT.cpp
GmmGen10ResourceULT.cpp
GmmGen11CachePolicyULT.cpp
GmmGen12CachePolicyULT.cpp
GmmGen11ResourceULT.cpp
GmmGen12ResourceULT.cpp
GmmGen9CachePolicyULT.cpp
GmmGen9ResourceULT.cpp
GmmResourceCpuBltULT.cpp
GmmResourceULT.cpp
googletest/src/gtest-all.cc
GmmULT.cpp
)
source_group("Source Files\\Cache Policy" FILES
GmmCachePolicyULT.cpp
GmmGen9CachePolicyULT.cpp
GmmGen10CachePolicyULT.cpp
GmmGen11CachePolicyULT.cpp
GmmGen12CachePolicyULT.cpp
)
source_group("Source Files\\Resource" FILES
GmmGen10ResourceULT.cpp
GmmGen11ResourceULT.cpp
GmmGen12ResourceULT.cpp
GmmGen9ResourceULT.cpp
GmmResourceCpuBltULT.cpp
GmmResourceULT.cpp
)
source_group("Header Files\\Cache Policy" FILES
GmmCachePolicyULT.h
GmmGen10CachePolicyULT.h
GmmGen11CachePolicyULT.h
GmmGen12CachePolicyULT.h
GmmGen9CachePolicyULT.h
)
source_group("Header Files\\Resource" FILES
GmmGen12ResourceULT.h
GmmGen11ResourceULT.h
GmmGen10ResourceULT.h
GmmGen9ResourceULT.h
GmmResourceULT.h
)
source_group("gtest" FILES
googletest/gtest/gtest.h
googletest/src/gtest-all.cc
)
include_directories(BEFORE ./)
include_directories(BEFORE ${PROJECT_SOURCE_DIR})
include_directories(
googletest
googletest/gtest
${BS_DIR_INC}/umKmInc
${BS_DIR_INC}
${BS_DIR_GMMLIB}/inc
${BS_DIR_INC}/common
)
macro(GmmLibULTSetTargetConfig ultTarget)
if (TARGET ${ultTarget})
set_property(TARGET ${ultTarget} APPEND PROPERTY COMPILE_DEFINITIONS
$<$<CONFIG:Release>: _RELEASE>
$<$<CONFIG:ReleaseInternal>: _RELEASE_INTERNAL>
$<$<CONFIG:Debug>: _DEBUG>
)
endif()
endmacro()
add_executable(${EXE_NAME} ${GMMULT_HEADERS} ${GMMULT_SOURCES})
GmmLibULTSetTargetConfig(${EXE_NAME})
set_property(TARGET ${EXE_NAME} APPEND PROPERTY COMPILE_DEFINITIONS __GMM GMM_LIB_DLL __UMD)
if(NOT TARGET igfx_gmmumd_dll)
add_subdirectory("${BS_DIR_GMMLIB}" "${CMAKE_BINARY_DIR}/gmmlib/ult")
endif()
target_link_libraries(${EXE_NAME} igfx_gmmumd_dll)
target_link_libraries(${EXE_NAME}
pthread
dl
)
add_custom_target(Run_ULT ALL DEPENDS GMMULT)
add_custom_command(
TARGET Run_ULT
POST_BUILD
COMMAND echo running ULTs
COMMAND "${CMAKE_COMMAND}" -E env "LD_LIBRARY_PATH=$<TARGET_FILE_DIR:igfx_gmmumd_dll>" ${CMAKE_CFG_INTDIR}/${EXE_NAME} --gtest_filter=CTest*
)

View File

@ -0,0 +1,238 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#include "GmmGen12CachePolicyULT.h"
using namespace std;
/////////////////////////////////////////////////////////////////////////////////////
/// Sets up common environment for Cache Policy fixture tests. this is called once per
/// test case before executing all tests under resource fixture test case.
/// It also calls SetupTestCase from CommonULT to initialize global context and others.
///
/////////////////////////////////////////////////////////////////////////////////////
void CTestGen12CachePolicy::SetUpTestCase()
{
GfxPlatform.eProductFamily = IGFX_TIGERLAKE_LP;
GfxPlatform.eRenderCoreFamily = IGFX_GEN12_CORE;
AllocateAdapterInfo();
pGfxAdapterInfo->SystemInfo.L3CacheSizeInKb = 3072;
const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrEDram = false;
const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLLCBypass = 1;
CommonULT::SetUpTestCase();
printf("%s\n", __FUNCTION__);
}
/////////////////////////////////////////////////////////////////////////////////////
/// cleans up once all the tests finish execution. It also calls TearDownTestCase
/// from CommonULT to destroy global context and others.
///
/////////////////////////////////////////////////////////////////////////////////////
void CTestGen12CachePolicy::TearDownTestCase()
{
printf("%s\n", __FUNCTION__);
CommonULT::TearDownTestCase();
}
void CTestGen12CachePolicy::CheckL3CachePolicy()
{
const uint32_t L3_WB_CACHEABLE = 0x3;
const uint32_t L3_UNCACHEABLE = 0x1;
// Check Usage MOCS index against MOCS settings
for(uint32_t Usage = GMM_RESOURCE_USAGE_UNKNOWN; Usage < GMM_RESOURCE_USAGE_MAX; Usage++)
{
GMM_CACHE_POLICY_ELEMENT ClientRequest = pGmmULTClientContext->GetCachePolicyElement((GMM_RESOURCE_USAGE_TYPE)Usage);
uint32_t AssignedMocsIdx = ClientRequest.MemoryObjectOverride.Gen12.Index;
GMM_CACHE_POLICY_TBL_ELEMENT Mocs = pGmmULTClientContext->GetCachePolicyTlbElement(AssignedMocsIdx);
//printf("Usage: %d --> Index: [%d]\n", Usage, AssignedMocsIdx);
EXPECT_EQ(0, Mocs.L3.ESC) << "Usage# " << Usage << ": ESC is non-zero";
EXPECT_EQ(0, Mocs.L3.SCC) << "Usage# " << Usage << ": SCC is non-zero";
EXPECT_EQ(0, Mocs.L3.Reserved) << "Usage# " << Usage << ": Reserved field is non-zero";
// Check if Mocs Index is not greater than GMM_MAX_NUMBER_MOCS_INDEXES
EXPECT_GT(GMM_MAX_NUMBER_MOCS_INDEXES, AssignedMocsIdx) << "Usage# " << Usage << ": MOCS Index greater than MAX allowed (63)";
if(ClientRequest.L3Eviction == 0x2) //63
{
if((GMM_RESOURCE_USAGE_TYPE)Usage == GMM_RESOURCE_USAGE_L3_EVICTION)
{
EXPECT_EQ(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
EXPECT_EQ(0, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
}
else
{
EXPECT_NE(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
EXPECT_EQ(1, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
}
}
else if(ClientRequest.L3Eviction == 0x3) //61
{
EXPECT_EQ(AssignedMocsIdx, 61) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
EXPECT_EQ(1, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
}
else if(Usage == GMM_RESOURCE_USAGE_CCS) //60
{
EXPECT_EQ(AssignedMocsIdx, 60) << "Usage# " << Usage << ": Incorrect Index for CCS";
EXPECT_EQ(0, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for CCS";
}
else if(Usage == GMM_RESOURCE_USAGE_MOCS_62) //62
{
EXPECT_EQ(AssignedMocsIdx, 62) << "Usage# " << Usage << ": Incorrect Index for MOCS_62";
EXPECT_EQ(0, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for MOCS#62";
}
// Check of assigned Index setting is appropriate for HDCL1 setting
else if(ClientRequest.HDCL1)
{
EXPECT_GE(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
}
else
{
EXPECT_LT(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
}
if(ClientRequest.L3)
{
EXPECT_EQ(L3_WB_CACHEABLE, Mocs.L3.Cacheability) << "Usage# " << Usage << ": Incorrect L3 cachebility setting";
}
else
{
EXPECT_EQ(L3_UNCACHEABLE, Mocs.L3.Cacheability) << "Usage# " << Usage << ": Incorrect L3 cachebility setting";
}
}
}
TEST_F(CTestGen12CachePolicy, TestL3CachePolicy)
{
CheckL3CachePolicy();
}
void CTestGen12CachePolicy::CheckLlcEdramCachePolicy()
{
const uint32_t TargetCache_LLC = 1;
const uint32_t LeCC_UNCACHEABLE = 0x0;
const uint32_t LeCC_WC_UNCACHEABLE = 0x1;
const uint32_t LeCC_WB_CACHEABLE = 0x3;
const uint32_t LeCC_WT_CACHEABLE = 0x2;
// Check Usage MOCS index against MOCS settings
for(uint32_t Usage = GMM_RESOURCE_USAGE_UNKNOWN; Usage < GMM_RESOURCE_USAGE_MAX; Usage++)
{
GMM_CACHE_POLICY_ELEMENT ClientRequest = pGmmULTClientContext->GetCachePolicyElement((GMM_RESOURCE_USAGE_TYPE)Usage);
uint32_t AssignedMocsIdx = ClientRequest.MemoryObjectOverride.Gen12.Index;
GMM_CACHE_POLICY_TBL_ELEMENT Mocs = pGmmULTClientContext->GetCachePolicyTlbElement(AssignedMocsIdx);
// Check for unused fields
EXPECT_EQ(0, Mocs.LeCC.AOM) << "Usage# " << Usage << ": AOM is non-zero";
EXPECT_EQ(0, Mocs.LeCC.CoS) << "Usage# " << Usage << ": CoS is non-zero";
EXPECT_EQ(0, Mocs.LeCC.PFM) << "Usage# " << Usage << ": PFM is non-zero";
EXPECT_EQ(0, Mocs.LeCC.SCC) << "Usage# " << Usage << ": SCC is non-zero";
// SCF field might be set for LKF/Gen12+ platforms;
EXPECT_EQ(0, Mocs.LeCC.SCF & !const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLLCBypass) << "Usage# " << Usage << ": SCF is non-zero";
EXPECT_EQ(0, Mocs.LeCC.ESC) << "Usage# " << Usage << ": ESC is non-zero";
EXPECT_EQ(0, Mocs.LeCC.Reserved) << "Usage# " << Usage << ": Reserved field is non-zero";
// Check for age
EXPECT_EQ(ClientRequest.AGE, Mocs.LeCC.LRUM) << "Usage# " << Usage << ": Incorrect AGE settings";
// Check for Snoop Setting
EXPECT_EQ(ClientRequest.SSO, Mocs.LeCC.SelfSnoop) << "Usage# " << Usage << ": Self Snoop is non-zero";
// Check if Mocs Index is not greater than GMM_MAX_NUMBER_MOCS_INDEXES
EXPECT_GT(GMM_MAX_NUMBER_MOCS_INDEXES, AssignedMocsIdx) << "Usage# " << Usage << ": MOCS Index greater than MAX allowed (63)";
if(ClientRequest.L3Eviction == 0x2) //63
{
GMM_CACHE_POLICY_ELEMENT MOCS63 = pGmmULTClientContext->GetCachePolicyElement(GMM_RESOURCE_USAGE_L3_EVICTION);
if((GMM_RESOURCE_USAGE_TYPE)Usage == GMM_RESOURCE_USAGE_L3_EVICTION)
{
EXPECT_EQ(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
}
else
{
MOCS63.L3 = 1; //Override L3 to test , since Hw forces it to L3-uncached
EXPECT_NE(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
EXPECT_EQ(MOCS63.Value, ClientRequest.Value) << "Usage# " << Usage << ": Incorrect usage for L3Eviction type# " << ClientRequest.L3Eviction;
}
}
else if(ClientRequest.L3Eviction == 0x3) //61
{
GMM_CACHE_POLICY_ELEMENT MOCS61 = pGmmULTClientContext->GetCachePolicyElement(GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL);
EXPECT_EQ(AssignedMocsIdx, 61) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
EXPECT_EQ(0, ClientRequest.LLC) << "Usage# " << Usage << ": Incorrect LLC cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
EXPECT_EQ(MOCS61.Value, ClientRequest.Value) << "Usage# " << Usage << ": Incorrect usage for L3Eviction type# " << ClientRequest.L3Eviction;
}
else if(Usage == GMM_RESOURCE_USAGE_CCS) //60
{
EXPECT_EQ(AssignedMocsIdx, 60) << "Usage# " << Usage << ": Incorrect Index for CCS";
}
else if(Usage == GMM_RESOURCE_USAGE_MOCS_62) //62
{
EXPECT_EQ(AssignedMocsIdx, 62) << "Usage# " << Usage << ": Incorrect Index for MOCS_62";
}
// Check of assigned Index setting is appropriate for HDCL1 setting
else if(ClientRequest.HDCL1)
{
EXPECT_GE(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
}
else
{
EXPECT_LT(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
}
if(!ClientRequest.LLC && !ClientRequest.ELLC) // Uncached
{
EXPECT_EQ(LeCC_WC_UNCACHEABLE, Mocs.LeCC.Cacheability) << "Usage# " << Usage << ": Incorrect LLC/eDRAM cachebility setting";
}
else
{
if(ClientRequest.LLC) // LLC only
{
EXPECT_EQ(TargetCache_LLC, Mocs.LeCC.TargetCache) << "Usage# " << Usage << ": Incorrect target cache setting";
EXPECT_EQ(LeCC_WB_CACHEABLE, Mocs.LeCC.Cacheability) << "Usage# " << Usage << ": Incorrect LLC cachebility setting";
}
else
{
EXPECT_EQ(TargetCache_LLC, Mocs.LeCC.TargetCache) << "Usage# " << Usage << ": Incorrect target cache setting";
EXPECT_EQ(LeCC_WC_UNCACHEABLE, Mocs.LeCC.Cacheability) << "Usage# " << Usage << ": Incorrect LLC cachebility setting";
}
}
}
}
TEST_F(CTestGen12CachePolicy, TestLlcEdramCachePolicy)
{
CheckLlcEdramCachePolicy();
}

View File

@ -0,0 +1,37 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#pragma once
#include "GmmCachePolicyULT.h"
class CTestGen12CachePolicy : public CTestCachePolicy
{
protected:
virtual void CheckL3CachePolicy();
virtual void CheckLlcEdramCachePolicy();
public:
static void SetUpTestCase();
static void TearDownTestCase();
};
#pragma once

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,40 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#pragma once
#include "GmmGen10ResourceULT.h"
#include "../GmmLib/inc/Internal/Common/Platform/GmmGen12Platform.h"
class CTestGen12Resource : public CTestGen10Resource
{
public:
static void SetUpTestCase();
static void TearDownTestCase();
};
#define DEFINE_TILE(xxx, bpp) \
(bpp == TEST_BPP_8) ? TILE_##xxx##_8bpe : \
(bpp == TEST_BPP_16) ? TILE_##xxx##_16bpe : \
(bpp == TEST_BPP_32) ? TILE_##xxx##_32bpe : \
(bpp == TEST_BPP_64) ? TILE_##xxx##_64bpe : \
TILE_##xxx##_128bpe

View File

@ -2832,7 +2832,8 @@ TEST_F(CTestResource, TestPlanar2D_RGBP)
// VVVVVVVV
// VVVVVVVV
// VVVVVVVV
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@ -2845,19 +2846,29 @@ TEST_F(CTestResource, TestPlanar2D_RGBP)
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
gmmParams.BaseWidth64 = 0x100;
gmmParams.BaseHeight = 0x100;
gmmParams.BaseWidth64 = 0x101;
gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
gmmParams.Format = GMM_FORMAT_RGBP;
gmmParams.Format = GMM_FORMAT_RGBP;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight * 3 /*Y, U, V*/, TileSize[TileIndex][1]);
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
uint32_t Pitch, Height;
if(Tile != TEST_LINEAR)
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
//Since Tile alignment factor is greater than GMM_IMCx_PLANE_ROW_ALIGNMENT=16
Height = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
Height = GMM_ULT_ALIGN(Height, TileSize[TileIndex][1]) * 3 /*Y, U, V*/;
}
else
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
Height = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment /* min16 rows*/) * 3 /*Y, U, V*/;
}
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@ -2875,11 +2886,11 @@ TEST_F(CTestResource, TestPlanar2D_RGBP)
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(Height / 3, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
// V plane should be at end of U plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
EXPECT_EQ(gmmParams.BaseHeight * 2, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
EXPECT_EQ(2 * (Height / 3), ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@ -2897,7 +2908,8 @@ TEST_F(CTestResource, TestPlanar2D_MFX_JPEG_YUV422V)
// UUUUUUUU
// VVVVVVVV
// VVVVVVVV
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@ -2910,8 +2922,8 @@ TEST_F(CTestResource, TestPlanar2D_MFX_JPEG_YUV422V)
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
gmmParams.BaseWidth64 = 0x100;
gmmParams.BaseHeight = 0x100;
gmmParams.BaseWidth64 = 0x101;
gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
@ -2920,9 +2932,25 @@ TEST_F(CTestResource, TestPlanar2D_MFX_JPEG_YUV422V)
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y */ + gmmParams.BaseHeight /*U, V*/, TileSize[TileIndex][1]);
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
uint32_t Pitch, Height;
uint32_t YHeight, VHeight;
if(Tile != TEST_LINEAR)
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
YHeight = GMM_ULT_ALIGN(YHeight, TileSize[TileIndex][1]);
VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 2) / 2, PlaneRowAlignment);
VHeight = GMM_ULT_ALIGN(VHeight, TileSize[TileIndex][1]);
}
else
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 2) / 2, PlaneRowAlignment);
}
Height = YHeight + 2 * VHeight;
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@ -2940,11 +2968,11 @@ TEST_F(CTestResource, TestPlanar2D_MFX_JPEG_YUV422V)
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
// V plane should be at end of U plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
EXPECT_EQ(gmmParams.BaseHeight + gmmParams.BaseHeight / 2, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
EXPECT_EQ(YHeight + VHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@ -2960,7 +2988,8 @@ TEST_F(CTestResource, TestPlanar2D_MFX_JPEG_YUV411R)
//YYYYYYYY
//UUUUUUUU
//VVVVVVVV
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@ -2973,19 +3002,35 @@ TEST_F(CTestResource, TestPlanar2D_MFX_JPEG_YUV411R)
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
gmmParams.BaseWidth64 = 0x100;
gmmParams.BaseHeight = 0x100;
gmmParams.BaseWidth64 = 0x101;
gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
gmmParams.Format = GMM_FORMAT_MFX_JPEG_YUV411R_TYPE;
gmmParams.Format = GMM_FORMAT_MFX_JPEG_YUV411R_TYPE;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y */ + gmmParams.BaseHeight / 2 /*U, V*/, TileSize[TileIndex][1]);
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
uint32_t Pitch, Height;
uint32_t YHeight, VHeight;
if(Tile != TEST_LINEAR)
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
YHeight = GMM_ULT_ALIGN(YHeight, TileSize[TileIndex][1]);
VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 4) / 4, PlaneRowAlignment);
VHeight = GMM_ULT_ALIGN(VHeight, TileSize[TileIndex][1]);
}
else
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 4) / 4, PlaneRowAlignment);
}
Height = YHeight + 2 * VHeight;
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@ -3003,11 +3048,11 @@ TEST_F(CTestResource, TestPlanar2D_MFX_JPEG_YUV411R)
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
// V plane should be at end of U plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
EXPECT_EQ(gmmParams.BaseHeight + gmmParams.BaseHeight / 4, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
EXPECT_EQ(YHeight + VHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@ -3040,15 +3085,26 @@ TEST_F(CTestResource, TestPlanar2D_NV12)
gmmParams.BaseHeight = 0x100;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all tiled planar surfaces
gmmParams.Format = GMM_FORMAT_NV12;
gmmParams.Format = GMM_FORMAT_NV12;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y*/ + gmmParams.BaseHeight / 2 /*UV*/, TileSize[TileIndex][1]);
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
uint32_t Pitch, Height;
if(Tile != TEST_LINEAR)
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
Height = GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[TileIndex][1]) +
GMM_ULT_ALIGN(gmmParams.BaseHeight / 2, TileSize[TileIndex][1]);
}
else
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y*/ + gmmParams.BaseHeight / 2 /*UV*/, TileSize[TileIndex][1]);
}
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@ -3066,9 +3122,18 @@ TEST_F(CTestResource, TestPlanar2D_NV12)
// U/V plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
if(Tile != TEST_LINEAR)
{
EXPECT_EQ(GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[TileIndex][1]), ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[TileIndex][1]), ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
}
else
{
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
}
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@ -3084,7 +3149,8 @@ TEST_F(CTestResource, TestPlanar2D_IMC4)
// YYYYYYYY
// UUUUVVVV
// UUUUVVVV
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@ -3097,19 +3163,41 @@ TEST_F(CTestResource, TestPlanar2D_IMC4)
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
gmmParams.BaseWidth64 = 0x100;
gmmParams.BaseHeight = 0x100;
gmmParams.BaseWidth64 = 0x101;
gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
gmmParams.Format = GMM_FORMAT_IMC4;
gmmParams.Format = GMM_FORMAT_IMC4;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y*/ + gmmParams.BaseHeight / 2 /*UV*/, TileSize[TileIndex][1]);
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
uint32_t Pitch, Height;
uint32_t YHeight, VHeight;
if(Tile != TEST_LINEAR)
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
if(Pitch / TileSize[TileIndex][0] % 2)
{
Pitch += TileSize[TileIndex][0];
}
YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
VHeight = YHeight / 2;
YHeight = GMM_ULT_ALIGN(YHeight, TileSize[TileIndex][1]);
VHeight = GMM_ULT_ALIGN(VHeight, TileSize[TileIndex][1]); // No need of PlaneRowAlignment since last plane
}
else
{
Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
VHeight = YHeight / 2;
}
Height = YHeight + VHeight;
uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@ -3127,13 +3215,18 @@ TEST_F(CTestResource, TestPlanar2D_IMC4)
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
// V plane should be at end of U plane
EXPECT_EQ(Pitch / 2, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
if(Tile != TEST_LINEAR)
{
EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
}
else
{
EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
}
}
}

View File

@ -24,11 +24,14 @@ OTHER DEALINGS IN THE SOFTWARE.
#include "Internal/Common/Platform/GmmGen10Platform.h"
#include "Internal/Common/Platform/GmmGen11Platform.h"
#include "Internal/Common/Platform/GmmGen12Platform.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen10.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen11.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen12.h"
#include "Internal/Common/Texture/GmmTextureCalc.h"
#include "Internal/Common/Texture/GmmGen10TextureCalc.h"
#include "Internal/Common/Texture/GmmGen11TextureCalc.h"
#include "Internal/Common/Texture/GmmGen12TextureCalc.h"
/////////////////////////////////////////////////////////////////////////////////////
/// Static function to return a PlatformInfo object based on input platform
@ -60,7 +63,11 @@ GmmLib::PlatformInfo *GmmLib::PlatformInfo::Create(PLATFORM Platform, bool Overr
}
#endif
GMM_DPF_EXIT;
if(GFX_GET_CURRENT_RENDERCORE(Platform) >= IGFX_GEN11_CORE)
if (GFX_GET_CURRENT_RENDERCORE(Platform) >= IGFX_GEN12_CORE)
{
return new GmmLib::PlatformInfoGen12(Platform);
}
else if(GFX_GET_CURRENT_RENDERCORE(Platform) >= IGFX_GEN11_CORE)
{
return new GmmLib::PlatformInfoGen11(Platform);
}
@ -95,7 +102,11 @@ GmmLib::GmmCachePolicyCommon *GmmLib::GmmCachePolicyCommon::Create()
return pGmmGlobalContext->GetCachePolicyObj();
}
if(GFX_GET_CURRENT_RENDERCORE(pGmmGlobalContext->GetPlatformInfo().Platform) >= IGFX_GEN11_CORE)
if (GFX_GET_CURRENT_RENDERCORE(pGmmGlobalContext->GetPlatformInfo().Platform) >= IGFX_GEN12_CORE)
{
pGmmCachePolicy = new GmmLib::GmmGen12CachePolicy(CachePolicy);
}
else if(GFX_GET_CURRENT_RENDERCORE(pGmmGlobalContext->GetPlatformInfo().Platform) >= IGFX_GEN11_CORE)
{
pGmmCachePolicy = new GmmLib::GmmGen11CachePolicy(CachePolicy);
}
@ -158,8 +169,9 @@ GmmLib::GmmTextureCalc *GmmLib::GmmTextureCalc::Create(PLATFORM Platform, uint8_
case IGFX_GEN11_CORE:
return new GmmGen11TextureCalc();
break;
case IGFX_GEN12_CORE:
default:
return new GmmGen11TextureCalc();
return new GmmGen12TextureCalc();
break;
}
}

View File

@ -177,6 +177,7 @@ bool GMM_STDCALL GmmIsYUVFormatLCUAligned(GMM_RESOURCE_FORMAT Format)
case GMM_FORMAT_P010:
case GMM_FORMAT_P016:
case GMM_FORMAT_YUY2:
case GMM_FORMAT_Y210:
case GMM_FORMAT_Y410:
case GMM_FORMAT_Y216:
case GMM_FORMAT_Y416:

View File

@ -0,0 +1,68 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#pragma once
#ifdef __cplusplus
#include "../GmmCachePolicyCommon.h"
#define GMM_GEN12_MAX_NUMBER_MOCS_INDEXES (60) // On TGL last four (#60-#63) are reserved by h/w, few? are sw configurable though (#60)
namespace GmmLib
{
class NON_PAGED_SECTION GmmGen12CachePolicy :
public GmmGen11CachePolicy
{
public:
/* Constructors */
GmmGen12CachePolicy(GMM_CACHE_POLICY_ELEMENT *pCachePolicy) :GmmGen11CachePolicy(pCachePolicy)
{
#if(defined(__GMM_KMD__))
{
// Set the WA's needed for Private PAT initialization
SetPATInitWA();
SetupPAT();
}
#endif
}
virtual ~GmmGen12CachePolicy()
{
}
virtual uint32_t GetMaxSpecialMocsIndex()
{
return CurrentMaxSpecialMocsIndex;
}
int32_t IsSpecialMOCSUsage(GMM_RESOURCE_USAGE_TYPE Usage, bool& UpdateMOCS);
/* Function prototypes */
GMM_STATUS InitCachePolicy();
uint8_t SelectNewPATIdx(GMM_GFX_MEMORY_TYPE WantedMT, GMM_GFX_MEMORY_TYPE MT1, GMM_GFX_MEMORY_TYPE MT2);
uint32_t BestMatchingPATIdx(GMM_CACHE_POLICY_ELEMENT CachePolicy);
GMM_STATUS SetPATInitWA();
GMM_STATUS SetupPAT();
void SetUpMOCSTable();
};
}
#endif // #ifdef __cplusplus

View File

@ -173,6 +173,12 @@ typedef union GMM_PRIVATE_PAT_REC {
}Gen10;
#endif
struct
{
uint32_t MemoryType : 2;
uint32_t Reserved : 30;
}Gen12;
uint32_t Value;
} GMM_PRIVATE_PAT;

View File

@ -110,7 +110,7 @@ typedef union MEMORY_OBJECT_CONTROL_STATE_REC
uint32_t EncryptedData : 1;
uint32_t Index : 6 ;
uint32_t : 25;
}Gen9, Gen10, Gen11;
}Gen9, Gen10, Gen11,Gen12;
uint32_t DwordValue;
}MEMORY_OBJECT_CONTROL_STATE;

View File

@ -112,6 +112,9 @@ namespace GmmLib
GMM_VIRTUAL uint8_t GMM_STDCALL IsCompressed(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL uint8_t GMM_STDCALL IsYUVPacked(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL GMM_SURFACESTATE_FORMAT GMM_STDCALL GetSurfaceStateFormat(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL uint8_t GMM_STDCALL GetSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL uint8_t GMM_STDCALL GetMediaSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL GMM_E2ECOMP_FORMAT GMM_STDCALL GetLosslessCompressionType(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL uint64_t GMM_STDCALL GetInternalGpuVaRangeLimit();
/* ResourceInfo Creation and Destroy API's */

View File

@ -196,6 +196,20 @@ typedef enum GMM_TILE_TYPE_ENUM
GMM_NOT_TILED
}GMM_TILE_TYPE;
//===========================================================================
// typedef:
// GMM_E2E_COMPRESSION_TYPE_ENUM
//
// Description:
// This enum details compression type (i.e. render or media compressed, or uncompressed )
//---------------------------------------------------------------------------
typedef enum GMM_E2E_COMPRESSION_TYPE_ENUM
{
GMM_UNCOMPRESSED,
GMM_RENDER_COMPRESSED,
GMM_MEDIA_COMPRESSED
}GMM_E2E_COMPRESSION_TYPE;
//===========================================================================
// typedef:
// GMM_CPU_CACHE_TYPE_ENUM
@ -416,6 +430,70 @@ typedef enum GMM_SURFACESTATE_FORMAT_ENUM
#include "GmmFormatTable.h"
} GMM_SURFACESTATE_FORMAT;
typedef enum GMM_E2ECOMP_FORMAT_ENUM
{
GMM_E2ECOMP_FORMAT_INVALID = 0,
GMM_E2ECOMP_FORMAT_RGB64, //1h - Reserved
GMM_E2ECOMP_FORMAT_RGB32, //2h - Reserved
GMM_E2ECOMP_MIN_FORMAT = GMM_E2ECOMP_FORMAT_RGB32,
GMM_E2ECOMP_FORMAT_YUY2, //3h
GMM_E2ECOMP_FORMAT_YCRCB_SWAPUV = GMM_E2ECOMP_FORMAT_YUY2,
GMM_E2ECOMP_FORMAT_YCRCB_SWAPUVY = GMM_E2ECOMP_FORMAT_YUY2,
GMM_E2ECOMP_FORMAT_YCRCB_SWAPY = GMM_E2ECOMP_FORMAT_YUY2,
GMM_E2ECOMP_FORMAT_Y410, //4h
GMM_E2ECOMP_FORMAT_Y210, //5h
GMM_E2ECOMP_FORMAT_Y216 = GMM_E2ECOMP_FORMAT_Y210,
GMM_E2ECOMP_FORMAT_Y416, //6h
GMM_E2ECOMP_FORMAT_P010, //7h
GMM_E2ECOMP_FORMAT_P016, //8h
GMM_E2ECOMP_FORMAT_AYUV, //9h
GMM_E2ECOMP_FORMAT_ARGB8b, //Ah
GMM_E2ECOMP_FORMAT_RGB5A1 = GMM_E2ECOMP_FORMAT_ARGB8b,
GMM_E2ECOMP_FORMAT_RGBA4 = GMM_E2ECOMP_FORMAT_ARGB8b,
GMM_E2ECOMP_FORMAT_B5G6R5 = GMM_E2ECOMP_FORMAT_ARGB8b,
GMM_E2ECOMP_FORMAT_SWAPY, //Bh
GMM_E2ECOMP_FORMAT_SWAPUV, //Ch
GMM_E2ECOMP_FORMAT_SWAPUVY, //Dh
GMM_E2ECOMP_FORMAT_RGB10b, //Eh --Which media format is it?
GMM_E2ECOMP_FORMAT_NV12, //Fh
GMM_E2ECOMP_FORMAT_RGBAFLOAT16, //0x10h
GMM_E2ECOMP_FORMAT_R32G32B32A32_FLOAT, //0x11h
GMM_E2ECOMP_FORMAT_R32G32B32A32_SINT, //0x12h
GMM_E2ECOMP_FORMAT_R32G32B32A32_UINT, //0x13h
GMM_E2ECOMP_FORMAT_R16G16B16A16_UNORM, //0x14h
GMM_E2ECOMP_FORMAT_R16G16B16A16_SNORM, //0x15h
GMM_E2ECOMP_FORMAT_R16G16B16A16_SINT, //0x16h
GMM_E2ECOMP_FORMAT_R16G16B16A16_UINT, //0x17h
GMM_E2ECOMP_FORMAT_R10G10B10A2_UNORM, //0x18h
GMM_E2ECOMP_FORMAT_RGB10A2 = GMM_E2ECOMP_FORMAT_R10G10B10A2_UNORM,
GMM_E2ECOMP_FORMAT_R10G10B10FLOAT_A2_UNORM, //0x19h
GMM_E2ECOMP_FORMAT_R10G10B10A2_UINT, //0x1Ah
GMM_E2ECOMP_FORMAT_R8G8B8A8_SNORM, //0x1Bh
GMM_E2ECOMP_FORMAT_R8G8B8A8_SINT, //0x1Ch
GMM_E2ECOMP_FORMAT_R8G8B8A8_UINT, //0x1Dh
GMM_E2ECOMP_FORMAT_R11G11B10_FLOAT, //0x1Eh
GMM_E2ECOMP_FORMAT_RG11B10 = GMM_E2ECOMP_FORMAT_R11G11B10_FLOAT,
GMM_E2ECOMP_MAX_FORMAT = GMM_E2ECOMP_FORMAT_R11G11B10_FLOAT, //should always be equal to last format encoding
GMM_E2ECOMP_FORMAT_RGBA = GMM_E2ECOMP_FORMAT_INVALID,
GMM_E2ECOMP_FORMAT_R = GMM_E2ECOMP_FORMAT_INVALID,
GMM_E2ECOMP_FORMAT_RG = GMM_E2ECOMP_FORMAT_INVALID,
} GMM_E2ECOMP_FORMAT;
//===========================================================================
// typedef:
// GMM_TILE_WALK

View File

@ -46,418 +46,428 @@ OTHER DEALINGS IN THE SOFTWARE.
#define VLV2 GFX_IS_PRODUCT(Data.Platform,IGFX_VALLEYVIEW)
#define WA GMM_FORMAT_WA
#define x 0
#define NC 0
#define NC GMM_COMPR_FORMAT_INVALID
#define FC(ver, bpc, fmtstr, bpcstr, typestr) \
(ver == 1 || SKU(FtrE2ECompression)) ? \
((bpc == 16) ? GMM_E2ECOMP_FORMAT_RGBAFLOAT16 : \
(bpc == 32) ? GMM_E2ECOMP_FORMAT_R32G32B32A32_FLOAT : \
(bpc == 8) ? GMM_E2ECOMP_FORMAT_ARGB8b : \
(bpc == x) ? GMM_E2ECOMP_FORMAT_##fmtstr : NC) :NC
/****************************************************************************\
GMM FORMAT TABLE
(See bottom of file for more info.)
Supported (ALWAYS / *) -----------------------------------------------------------o
Reserved ---------------------------------------------------------------o |
RCS SURFACE_STATE.Format (or NA) --------------------------------o | |
ASTC Format (A / x) ----------------------------------------o | | |
Render Target Eligibility (R / x / *) -------------------o | | | |
Element Depth (Pixels) -------------------------------o | | | | |
Element Height (Pixels) ---------------------------o | | | | | |
Element Width (Pixels) ------------------------o | | | | | | |
Bits-per-Element -------------------------o | | | | | | | |
| | | | | | | | |
Name bpe w h d R A RCS.SS RESV Available
--------------------------------------------------------------------------------------*/
Supported (ALWAYS / *) -----------------------------------------------------------------o
SURFACE_STATE.CompressionFormat (or NC) --------------------------------------o |
RCS SURFACE_STATE.Format (or NA) --------------------------------o | |
ASTC Format (A / x) ----------------------------------------o | | |
Render Target Eligibility (R / x / *) -------------------o | | | |
Element Depth (Pixels) -------------------------------o | | | | |
Element Height (Pixels) ---------------------------o | | | | | |
Element Width (Pixels) ------------------------o | | | | | | |
Bits-per-Element -------------------------o | | | | | | | |
| | | | | | | | |
Name bpe w h d R A RCS.SS CompressFormat Available
------------------------------------------------------------------------------------------*/
#ifdef INCLUDE_SURFACESTATE_FORMATS
GMM_FORMAT( A1B5G5R5_UNORM , 16, 1, 1, 1, R, x, 0x124, 0xA , GEN(8) || VLV2 )
GMM_FORMAT( A4B4G4R4_UNORM , 16, 1, 1, 1, R, x, 0x125, 0xA , GEN(8) )
GMM_FORMAT( A4P4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x148, NC , ALWAYS )
GMM_FORMAT( A4P4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14F, NC , ALWAYS )
GMM_FORMAT( A8_UNORM , 8, 1, 1, 1, R, x, 0x144, 0xA , GEN(7) )
GMM_FORMAT( A8P8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x10F, NC , ALWAYS )
GMM_FORMAT( A8P8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x110, NC , ALWAYS )
GMM_FORMAT( A8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E7, NC , ALWAYS )
GMM_FORMAT( A16_FLOAT , 16, 1, 1, 1, R, x, 0x117, NC , GEN(7) )
GMM_FORMAT( A16_UNORM , 16, 1, 1, 1, R, x, 0x113, NC , GEN(7) )
GMM_FORMAT( A24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E2, NC , GEN(7) )
GMM_FORMAT( A32_FLOAT , 32, 1, 1, 1, R, x, 0x0E5, NC , GEN(7) )
GMM_FORMAT( A32_UNORM , 32, 1, 1, 1, R, x, 0x0DE, NC , GEN(7) )
GMM_FORMAT( A32X32_FLOAT , 64, 1, 1, 1, R, x, 0x090, NC , ALWAYS )
GMM_FORMAT( B4G4R4A4_UNORM , 16, 1, 1, 1, R, x, 0x104, 0xA , ALWAYS )
GMM_FORMAT( B4G4R4A4_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x105, 0xA , ALWAYS )
GMM_FORMAT( B5G5R5A1_UNORM , 16, 1, 1, 1, R, x, 0x102, 0xA , ALWAYS )
GMM_FORMAT( B5G5R5A1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x103, 0xA , ALWAYS )
GMM_FORMAT( B5G5R5X1_UNORM , 16, 1, 1, 1, R, x, 0x11A, 0xA , ALWAYS )
GMM_FORMAT( B5G5R5X1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x11B, 0xA , ALWAYS )
GMM_FORMAT( B5G6R5_UNORM , 16, 1, 1, 1, R, x, 0x100, 0xA , ALWAYS )
GMM_FORMAT( B5G6R5_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x101, 0xA , ALWAYS )
GMM_FORMAT( B8G8R8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C0, 0xA , ALWAYS )
GMM_FORMAT( B8G8R8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C1, 0xA , ALWAYS )
GMM_FORMAT( B8G8R8X8_UNORM , 32, 1, 1, 1, R, x, 0x0E9, 0xA , ALWAYS )
GMM_FORMAT( B8G8R8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EA, 0xA , ALWAYS )
GMM_FORMAT( B8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E8, NC , ALWAYS )
GMM_FORMAT( B10G10R10A2_SINT , 32, 1, 1, 1, R, x, 0x1BB, 0x18, GEN(8) )
GMM_FORMAT( B10G10R10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B7, 0x18, GEN(8) )
GMM_FORMAT( B10G10R10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B9, 0x18, GEN(8) )
GMM_FORMAT( B10G10R10A2_UINT , 32, 1, 1, 1, R, x, 0x1BA, 0x18, GEN(8) )
GMM_FORMAT( B10G10R10A2_UNORM , 32, 1, 1, 1, R, x, 0x0D1, 0x18, ALWAYS )
GMM_FORMAT( B10G10R10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0D2, 0x18, ALWAYS )
GMM_FORMAT( B10G10R10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B8, 0x18, GEN(8) )
GMM_FORMAT( B10G10R10X2_UNORM , 32, 1, 1, 1, R, x, 0x0EE, 0x18, ALWAYS )
GMM_FORMAT( BC1_UNORM , 64, 4, 4, 1, x, x, 0x186, NC , ALWAYS )
GMM_FORMAT( BC1_UNORM_SRGB , 64, 4, 4, 1, x, x, 0x18B, NC , ALWAYS )
GMM_FORMAT( BC2_UNORM , 128, 4, 4, 1, x, x, 0x187, NC , ALWAYS )
GMM_FORMAT( BC2_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18C, NC , ALWAYS )
GMM_FORMAT( BC3_UNORM , 128, 4, 4, 1, x, x, 0x188, NC , ALWAYS )
GMM_FORMAT( BC3_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18D, NC , ALWAYS )
GMM_FORMAT( BC4_SNORM , 64, 4, 4, 1, x, x, 0x199, NC , ALWAYS )
GMM_FORMAT( BC4_UNORM , 64, 4, 4, 1, x, x, 0x189, NC , ALWAYS )
GMM_FORMAT( BC5_SNORM , 128, 4, 4, 1, x, x, 0x19A, NC , ALWAYS )
GMM_FORMAT( BC5_UNORM , 128, 4, 4, 1, x, x, 0x18A, NC , ALWAYS )
GMM_FORMAT( BC6H_SF16 , 128, 4, 4, 1, x, x, 0x1A1, NC , GEN(7) )
GMM_FORMAT( BC6H_UF16 , 128, 4, 4, 1, x, x, 0x1A4, NC , GEN(7) )
GMM_FORMAT( BC7_UNORM , 128, 4, 4, 1, x, x, 0x1A2, NC , GEN(7) )
GMM_FORMAT( BC7_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x1A3, NC , GEN(7) )
GMM_FORMAT( DXT1_RGB , 64, 4, 4, 1, x, x, 0x191, NC , ALWAYS )
GMM_FORMAT( DXT1_RGB_SRGB , 64, 4, 4, 1, x, x, 0x180, NC , ALWAYS )
GMM_FORMAT( EAC_R11 , 64, 4, 4, 1, x, x, 0x1AB, NC , GEN(8) || VLV2 )
GMM_FORMAT( EAC_RG11 , 128, 4, 4, 1, x, x, 0x1AC, NC , GEN(8) || VLV2 )
GMM_FORMAT( EAC_SIGNED_R11 , 64, 4, 4, 1, x, x, 0x1AD, NC , GEN(8) || VLV2 )
GMM_FORMAT( EAC_SIGNED_RG11 , 128, 4, 4, 1, x, x, 0x1AE, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC1_RGB8 , 64, 4, 4, 1, x, x, 0x1A9, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_EAC_RGBA8 , 128, 4, 4, 1, x, x, 0x1C2, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_EAC_SRGB8_A8 , 128, 4, 4, 1, x, x, 0x1C3, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_RGB8 , 64, 4, 4, 1, x, x, 0x1AA, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_RGB8_PTA , 64, 4, 4, 1, x, x, 0x1C0, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_SRGB8 , 64, 4, 4, 1, x, x, 0x1AF, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_SRGB8_PTA , 64, 4, 4, 1, x, x, 0x1C1, NC , GEN(8) || VLV2 )
GMM_FORMAT( FXT1 , 128, 8, 4, 1, x, x, 0x192, NC , ALWAYS )
GMM_FORMAT( I8_SINT , 8, 1, 1, 1, R, x, 0x155, NC , GEN(9) )
GMM_FORMAT( I8_UINT , 8, 1, 1, 1, R, x, 0x154, NC , GEN(9) )
GMM_FORMAT( I8_UNORM , 8, 1, 1, 1, R, x, 0x145, NC , ALWAYS )
GMM_FORMAT( I16_FLOAT , 16, 1, 1, 1, R, x, 0x115, NC , ALWAYS )
GMM_FORMAT( I16_UNORM , 16, 1, 1, 1, R, x, 0x111, NC , ALWAYS )
GMM_FORMAT( I24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E0, NC , ALWAYS )
GMM_FORMAT( I32_FLOAT , 32, 1, 1, 1, R, x, 0x0E3, NC , ALWAYS )
GMM_FORMAT( I32X32_FLOAT , 64, 1, 1, 1, R, x, 0x092, NC , ALWAYS )
GMM_FORMAT( L8_SINT , 8, 1, 1, 1, R, x, 0x153, NC , GEN(9) )
GMM_FORMAT( L8_UINT , 8, 1, 1, 1, R, x, 0x152, NC , GEN(9) )
GMM_FORMAT( L8_UNORM , 8, 1, 1, 1, R, x, 0x146, NC , ALWAYS )
GMM_FORMAT( L8_UNORM_SRGB , 8, 1, 1, 1, R, x, 0x14C, NC , ALWAYS )
GMM_FORMAT( L8A8_SINT , 16, 1, 1, 1, R, x, 0x127, NC , GEN(9) )
GMM_FORMAT( L8A8_UINT , 16, 1, 1, 1, R, x, 0x126, NC , GEN(9) )
GMM_FORMAT( L8A8_UNORM , 16, 1, 1, 1, R, x, 0x114, NC , ALWAYS )
GMM_FORMAT( L8A8_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x118, NC , ALWAYS )
GMM_FORMAT( L16_FLOAT , 16, 1, 1, 1, R, x, 0x116, NC , ALWAYS )
GMM_FORMAT( L16_UNORM , 16, 1, 1, 1, R, x, 0x112, NC , ALWAYS )
GMM_FORMAT( L16A16_FLOAT , 32, 1, 1, 1, R, x, 0x0F0, NC , ALWAYS )
GMM_FORMAT( L16A16_UNORM , 32, 1, 1, 1, R, x, 0x0DF, NC , ALWAYS )
GMM_FORMAT( L24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E1, NC , ALWAYS )
GMM_FORMAT( L32_FLOAT , 32, 1, 1, 1, R, x, 0x0E4, NC , ALWAYS )
GMM_FORMAT( L32_UNORM , 32, 1, 1, 1, R, x, 0x0DD, NC , ALWAYS )
GMM_FORMAT( L32A32_FLOAT , 64, 1, 1, 1, R, x, 0x08A, NC , ALWAYS )
GMM_FORMAT( L32X32_FLOAT , 64, 1, 1, 1, R, x, 0x091, NC , ALWAYS )
GMM_FORMAT( MONO8 , 1, 1, 1, 1, R, x, 0x18E, NC , x ) // No current GMM support by this name.
GMM_FORMAT( P2_UNORM_PALETTE0 , 2, 1, 1, 1, R, x, 0x184, NC , x ) // No current GMM support by this name.
GMM_FORMAT( P2_UNORM_PALETTE1 , 2, 1, 1, 1, R, x, 0x185, NC , x ) // "
GMM_FORMAT( P4A4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x147, NC , ALWAYS )
GMM_FORMAT( P4A4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14E, NC , ALWAYS )
GMM_FORMAT( P8_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x14B, NC , ALWAYS )
GMM_FORMAT( P8_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14D, NC , ALWAYS )
GMM_FORMAT( P8A8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x122, NC , ALWAYS )
GMM_FORMAT( P8A8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x123, NC , ALWAYS )
GMM_FORMAT( PLANAR_420_8 , 8, 1, 1, 1, R, x, 0x1A5, NC , x ) // No current GMM support by this name.
GMM_FORMAT( PLANAR_420_16 , 16, 1, 1, 1, R, x, 0x1A6, NC , x ) // "
GMM_FORMAT( PLANAR_422_8 , 8, 1, 1, 1, R, x, 0x00F, NC , x ) // <-- TODO(Minor): Remove this HW-internal format.
GMM_FORMAT( R1_UNORM , 1, 1, 1, 1, R, x, 0x181, NC , x ) // "
GMM_FORMAT( R8_SINT , 8, 1, 1, 1, R, x, 0x142, 0xA , ALWAYS )
GMM_FORMAT( R8_SNORM , 8, 1, 1, 1, R, x, 0x141, 0xA , ALWAYS )
GMM_FORMAT( R8_SSCALED , 8, 1, 1, 1, R, x, 0x149, 0xA , ALWAYS )
GMM_FORMAT( R8_UINT , 8, 1, 1, 1, R, x, 0x143, 0xA , ALWAYS )
GMM_FORMAT( R8_UNORM , 8, 1, 1, 1, R, x, 0x140, 0xA , ALWAYS )
GMM_FORMAT( R8_USCALED , 8, 1, 1, 1, R, x, 0x14A, 0xA , ALWAYS )
GMM_FORMAT( R8G8_SINT , 16, 1, 1, 1, R, x, 0x108, 0xA , ALWAYS )
GMM_FORMAT( R8G8_SNORM , 16, 1, 1, 1, R, x, 0x107, 0xA , ALWAYS )
GMM_FORMAT( R8G8_SSCALED , 16, 1, 1, 1, R, x, 0x11C, 0xA , ALWAYS )
GMM_FORMAT( R8G8_UINT , 16, 1, 1, 1, R, x, 0x109, 0xA , ALWAYS )
GMM_FORMAT( R8G8_UNORM , 16, 1, 1, 1, R, x, 0x106, 0xA , ALWAYS )
GMM_FORMAT( R8G8_USCALED , 16, 1, 1, 1, R, x, 0x11D, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8_SINT , 24, 1, 1, 1, R, x, 0x1C9, NC , GEN(8) )
GMM_FORMAT( R8G8B8_SNORM , 24, 1, 1, 1, R, x, 0x194, NC , ALWAYS )
GMM_FORMAT( R8G8B8_SSCALED , 24, 1, 1, 1, R, x, 0x195, NC , ALWAYS )
GMM_FORMAT( R8G8B8_UINT , 24, 1, 1, 1, R, x, 0x1C8, NC , GEN(8) || VLV2 )
GMM_FORMAT( R8G8B8_UNORM , 24, 1, 1, 1, R, x, 0x193, NC , ALWAYS )
GMM_FORMAT( R8G8B8_UNORM_SRGB , 24, 1, 1, 1, R, x, 0x1A8, NC , GEN(7_5) )
GMM_FORMAT( R8G8B8_USCALED , 24, 1, 1, 1, R, x, 0x196, NC , ALWAYS )
GMM_FORMAT( R8G8B8A8_SINT , 32, 1, 1, 1, R, x, 0x0CA, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8A8_SNORM , 32, 1, 1, 1, R, x, 0x0C9, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8A8_SSCALED , 32, 1, 1, 1, R, x, 0x0F4, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8A8_UINT , 32, 1, 1, 1, R, x, 0x0CB, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C7, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C8, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8A8_USCALED , 32, 1, 1, 1, R, x, 0x0F5, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8X8_UNORM , 32, 1, 1, 1, R, x, 0x0EB, 0xA , ALWAYS )
GMM_FORMAT( R8G8B8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EC, 0xA , ALWAYS )
GMM_FORMAT( R9G9B9E5_SHAREDEXP , 32, 1, 1, 1, R, x, 0x0ED, NC , ALWAYS )
GMM_FORMAT( R10G10B10_SNORM_A2_UNORM , 32, 1, 1, 1, R, x, 0x0C5, 0x18, ALWAYS )
GMM_FORMAT( R10G10B10A2_SINT , 32, 1, 1, 1, R, x, 0x1B6, 0x18, GEN(8) )
GMM_FORMAT( R10G10B10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B3, 0x18, GEN(8) )
GMM_FORMAT( R10G10B10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B5, 0x18, GEN(8) )
GMM_FORMAT( R10G10B10A2_UINT , 32, 1, 1, 1, R, x, 0x0C4, 0x18, ALWAYS )
GMM_FORMAT( R10G10B10A2_UNORM , 32, 1, 1, 1, R, x, 0x0C2, 0x18, ALWAYS )
GMM_FORMAT( R10G10B10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C3, 0x18, ALWAYS )
GMM_FORMAT( R10G10B10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B4, 0x18, GEN(8) )
GMM_FORMAT( R10G10B10X2_USCALED , 32, 1, 1, 1, R, x, 0x0F3, 0x18, ALWAYS )
GMM_FORMAT( R11G11B10_FLOAT , 32, 1, 1, 1, R, x, 0x0D3, 0x1E, ALWAYS )
GMM_FORMAT( R16_FLOAT , 16, 1, 1, 1, R, x, 0x10E, 0x10, ALWAYS )
GMM_FORMAT( R16_SINT , 16, 1, 1, 1, R, x, 0x10C, 0x10, ALWAYS )
GMM_FORMAT( R16_SNORM , 16, 1, 1, 1, R, x, 0x10B, 0x10, ALWAYS )
GMM_FORMAT( R16_SSCALED , 16, 1, 1, 1, R, x, 0x11E, 0x10, ALWAYS )
GMM_FORMAT( R16_UINT , 16, 1, 1, 1, R, x, 0x10D, 0x10, ALWAYS )
GMM_FORMAT( R16_UNORM , 16, 1, 1, 1, R, x, 0x10A, 0x10, ALWAYS )
GMM_FORMAT( R16_USCALED , 16, 1, 1, 1, R, x, 0x11F, 0x10, ALWAYS )
GMM_FORMAT( R16G16_FLOAT , 32, 1, 1, 1, R, x, 0x0D0, 0x10, ALWAYS )
GMM_FORMAT( R16G16_SINT , 32, 1, 1, 1, R, x, 0x0CE, 0x10, ALWAYS )
GMM_FORMAT( R16G16_SNORM , 32, 1, 1, 1, R, x, 0x0CD, 0x10, ALWAYS )
GMM_FORMAT( R16G16_SSCALED , 32, 1, 1, 1, R, x, 0x0F6, 0x10, ALWAYS )
GMM_FORMAT( R16G16_UINT , 32, 1, 1, 1, R, x, 0x0CF, 0x10, ALWAYS )
GMM_FORMAT( R16G16_UNORM , 32, 1, 1, 1, R, x, 0x0CC, 0x10, ALWAYS )
GMM_FORMAT( R16G16_USCALED , 32, 1, 1, 1, R, x, 0x0F7, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16_FLOAT , 48, 1, 1, 1, R, x, 0x19B, NC , ALWAYS )
GMM_FORMAT( R16G16B16_SINT , 48, 1, 1, 1, R, x, 0x1B1, NC , GEN(8) )
GMM_FORMAT( R16G16B16_SNORM , 48, 1, 1, 1, R, x, 0x19D, NC , ALWAYS )
GMM_FORMAT( R16G16B16_SSCALED , 48, 1, 1, 1, R, x, 0x19E, NC , ALWAYS )
GMM_FORMAT( R16G16B16_UINT , 48, 1, 1, 1, R, x, 0x1B0, NC , GEN(8) || VLV2 )
GMM_FORMAT( R16G16B16_UNORM , 48, 1, 1, 1, R, x, 0x19C, NC , ALWAYS )
GMM_FORMAT( R16G16B16_USCALED , 48, 1, 1, 1, R, x, 0x19F, NC , ALWAYS )
GMM_FORMAT( R16G16B16A16_FLOAT , 64, 1, 1, 1, R, x, 0x084, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16A16_SINT , 64, 1, 1, 1, R, x, 0x082, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16A16_SNORM , 64, 1, 1, 1, R, x, 0x081, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16A16_SSCALED , 64, 1, 1, 1, R, x, 0x093, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16A16_UINT , 64, 1, 1, 1, R, x, 0x083, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16A16_UNORM , 64, 1, 1, 1, R, x, 0x080, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16A16_USCALED , 64, 1, 1, 1, R, x, 0x094, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16X16_FLOAT , 64, 1, 1, 1, R, x, 0x08F, 0x10, ALWAYS )
GMM_FORMAT( R16G16B16X16_UNORM , 64, 1, 1, 1, R, x, 0x08E, 0x10, ALWAYS )
GMM_FORMAT( R24_UNORM_X8_TYPELESS , 32, 1, 1, 1, R, x, 0x0D9, 0x11, ALWAYS )
GMM_FORMAT( R32_FLOAT , 32, 1, 1, 1, R, x, 0x0D8, 0x11, ALWAYS )
GMM_FORMAT( R32_FLOAT_X8X24_TYPELESS , 64, 1, 1, 1, R, x, 0x088, 0x11, ALWAYS )
GMM_FORMAT( R32_SFIXED , 32, 1, 1, 1, R, x, 0x1B2, 0x11, GEN(8) )
GMM_FORMAT( R32_SINT , 32, 1, 1, 1, R, x, 0x0D6, 0x11, ALWAYS )
GMM_FORMAT( R32_SNORM , 32, 1, 1, 1, R, x, 0x0F2, 0x11, ALWAYS )
GMM_FORMAT( R32_SSCALED , 32, 1, 1, 1, R, x, 0x0F8, 0x11, ALWAYS )
GMM_FORMAT( R32_UINT , 32, 1, 1, 1, R, x, 0x0D7, 0x11, ALWAYS )
GMM_FORMAT( R32_UNORM , 32, 1, 1, 1, R, x, 0x0F1, 0x11, ALWAYS )
GMM_FORMAT( R32_USCALED , 32, 1, 1, 1, R, x, 0x0F9, 0x11, ALWAYS )
GMM_FORMAT( R32G32_FLOAT , 64, 1, 1, 1, R, x, 0x085, 0x11, ALWAYS )
GMM_FORMAT( R32G32_SFIXED , 64, 1, 1, 1, R, x, 0x0A0, 0x11, ALWAYS )
GMM_FORMAT( R32G32_SINT , 64, 1, 1, 1, R, x, 0x086, 0x11, ALWAYS )
GMM_FORMAT( R32G32_SNORM , 64, 1, 1, 1, R, x, 0x08C, 0x11, ALWAYS )
GMM_FORMAT( R32G32_SSCALED , 64, 1, 1, 1, R, x, 0x095, 0x11, ALWAYS )
GMM_FORMAT( R32G32_UINT , 64, 1, 1, 1, R, x, 0x087, 0x11, ALWAYS )
GMM_FORMAT( R32G32_UNORM , 64, 1, 1, 1, R, x, 0x08B, 0x11, ALWAYS )
GMM_FORMAT( R32G32_USCALED , 64, 1, 1, 1, R, x, 0x096, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32_FLOAT , 96, 1, 1, 1, R, x, 0x040, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SFIXED , 96, 1, 1, 1, R, x, 0x050, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SINT , 96, 1, 1, 1, R, x, 0x041, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SNORM , 96, 1, 1, 1, R, x, 0x044, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SSCALED , 96, 1, 1, 1, R, x, 0x045, NC , ALWAYS )
GMM_FORMAT( R32G32B32_UINT , 96, 1, 1, 1, R, x, 0x042, NC , ALWAYS )
GMM_FORMAT( R32G32B32_UNORM , 96, 1, 1, 1, R, x, 0x043, NC , ALWAYS )
GMM_FORMAT( R32G32B32_USCALED , 96, 1, 1, 1, R, x, 0x046, NC , ALWAYS )
GMM_FORMAT( R32G32B32A32_FLOAT , 128, 1, 1, 1, R, x, 0x000, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32A32_SFIXED , 128, 1, 1, 1, R, x, 0x020, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32A32_SINT , 128, 1, 1, 1, R, x, 0x001, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32A32_SNORM , 128, 1, 1, 1, R, x, 0x004, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32A32_SSCALED , 128, 1, 1, 1, R, x, 0x007, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32A32_UINT , 128, 1, 1, 1, R, x, 0x002, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32A32_UNORM , 128, 1, 1, 1, R, x, 0x003, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32A32_USCALED , 128, 1, 1, 1, R, x, 0x008, 0x11, ALWAYS )
GMM_FORMAT( R32G32B32X32_FLOAT , 128, 1, 1, 1, R, x, 0x006, 0x11, ALWAYS )
GMM_FORMAT( R5G5_SNORM_B6_UNORM , 16, 1, 1, 1, R, x, 0x119, NC , ALWAYS )
GMM_FORMAT( R64_FLOAT , 64, 1, 1, 1, R, x, 0x08D, NC , ALWAYS )
GMM_FORMAT( R64_PASSTHRU , 64, 1, 1, 1, R, x, 0x0A1, NC , ALWAYS )
GMM_FORMAT( R64G64_FLOAT , 128, 1, 1, 1, R, x, 0x005, NC , ALWAYS )
GMM_FORMAT( R64G64_PASSTHRU , 128, 1, 1, 1, R, x, 0x021, NC , ALWAYS )
GMM_FORMAT( R64G64B64_FLOAT , 192, 1, 1, 1, R, x, 0x198, NC , ALWAYS )
GMM_FORMAT( R64G64B64_PASSTHRU , 192, 1, 1, 1, R, x, 0x1BD, NC , GEN(8) )
GMM_FORMAT( R64G64B64A64_FLOAT , 256, 1, 1, 1, R, x, 0x197, NC , ALWAYS )
GMM_FORMAT( R64G64B64A64_PASSTHRU , 256, 1, 1, 1, R, x, 0x1BC, NC , GEN(8) )
GMM_FORMAT( RAW , 8, 1, 1, 1, R, x, 0x1FF, NC , GEN(7) ) // "8bpp" for current GMM implementation.
GMM_FORMAT( X24_TYPELESS_G8_UINT , 32, 1, 1, 1, R, x, 0x0DA, 0xA , ALWAYS )
GMM_FORMAT( X32_TYPELESS_G8X24_UINT , 64, 1, 1, 1, R, x, 0x089, 0xA , ALWAYS )
GMM_FORMAT( X8B8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E6, NC , ALWAYS )
GMM_FORMAT( Y8_UNORM , 8, 1, 1, 1, R, x, 0x150, 0xF , ALWAYS )
GMM_FORMAT( YCRCB_NORMAL , 16, 1, 1, 1, R, x, 0x182, 0x3 , ALWAYS )
GMM_FORMAT( YCRCB_SWAPUV , 16, 1, 1, 1, R, x, 0x18F, 0xC , ALWAYS )
GMM_FORMAT( YCRCB_SWAPUVY , 16, 1, 1, 1, R, x, 0x183, 0xD , ALWAYS )
GMM_FORMAT( YCRCB_SWAPY , 16, 1, 1, 1, R, x, 0x190, 0xB , ALWAYS )
GMM_FORMAT( A1B5G5R5_UNORM , 16, 1, 1, 1, R, x, 0x124, FC(3, x, RGB5A1, , ), GEN(8) || VLV2 )
GMM_FORMAT( A4B4G4R4_UNORM , 16, 1, 1, 1, R, x, 0x125, FC(3, x, RGB5A1, , ), GEN(8) )
GMM_FORMAT( A4P4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x148, NC , ALWAYS )
GMM_FORMAT( A4P4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14F, NC , ALWAYS )
GMM_FORMAT( A8_UNORM , 8, 1, 1, 1, R, x, 0x144, FC(3, 8, R, 8, U), GEN(7) )
GMM_FORMAT( A8P8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x10F, NC , ALWAYS )
GMM_FORMAT( A8P8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x110, NC , ALWAYS )
GMM_FORMAT( A8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E7, NC , ALWAYS )
GMM_FORMAT( A16_FLOAT , 16, 1, 1, 1, R, x, 0x117, NC , GEN(7) )
GMM_FORMAT( A16_UNORM , 16, 1, 1, 1, R, x, 0x113, NC , GEN(7) )
GMM_FORMAT( A24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E2, NC , GEN(7) )
GMM_FORMAT( A32_FLOAT , 32, 1, 1, 1, R, x, 0x0E5, NC , GEN(7) )
GMM_FORMAT( A32_UNORM , 32, 1, 1, 1, R, x, 0x0DE, NC , GEN(7) )
GMM_FORMAT( A32X32_FLOAT , 64, 1, 1, 1, R, x, 0x090, NC , ALWAYS )
GMM_FORMAT( B4G4R4A4_UNORM , 16, 1, 1, 1, R, x, 0x104, FC(3, x, RGBA4, , ), ALWAYS )
GMM_FORMAT( B4G4R4A4_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x105, FC(3, x, RGBA4, , ), ALWAYS )
GMM_FORMAT( B5G5R5A1_UNORM , 16, 1, 1, 1, R, x, 0x102, FC(3, x, RGB5A1, , ), ALWAYS )
GMM_FORMAT( B5G5R5A1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x103, FC(3, x, RGB5A1, , ), ALWAYS )
GMM_FORMAT( B5G5R5X1_UNORM , 16, 1, 1, 1, R, x, 0x11A, FC(3, x, RGB5A1, , ), ALWAYS )
GMM_FORMAT( B5G5R5X1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x11B, FC(3, x, RGB5A1, , ), ALWAYS )
GMM_FORMAT( B5G6R5_UNORM , 16, 1, 1, 1, R, x, 0x100, FC(3, x, B5G6R5, , ), ALWAYS )
GMM_FORMAT( B5G6R5_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x101, FC(3, x, B5G6R5, , ), ALWAYS )
GMM_FORMAT( B8G8R8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C0, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( B8G8R8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C1, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( B8G8R8X8_UNORM , 32, 1, 1, 1, R, x, 0x0E9, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( B8G8R8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EA, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( B8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E8, NC , ALWAYS )
GMM_FORMAT( B10G10R10A2_SINT , 32, 1, 1, 1, R, x, 0x1BB, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( B10G10R10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B7, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( B10G10R10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B9, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( B10G10R10A2_UINT , 32, 1, 1, 1, R, x, 0x1BA, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( B10G10R10A2_UNORM , 32, 1, 1, 1, R, x, 0x0D1, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( B10G10R10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0D2, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( B10G10R10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B8, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( B10G10R10X2_UNORM , 32, 1, 1, 1, R, x, 0x0EE, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( BC1_UNORM , 64, 4, 4, 1, x, x, 0x186, NC , ALWAYS )
GMM_FORMAT( BC1_UNORM_SRGB , 64, 4, 4, 1, x, x, 0x18B, NC , ALWAYS )
GMM_FORMAT( BC2_UNORM , 128, 4, 4, 1, x, x, 0x187, NC , ALWAYS )
GMM_FORMAT( BC2_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18C, NC , ALWAYS )
GMM_FORMAT( BC3_UNORM , 128, 4, 4, 1, x, x, 0x188, NC , ALWAYS )
GMM_FORMAT( BC3_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18D, NC , ALWAYS )
GMM_FORMAT( BC4_SNORM , 64, 4, 4, 1, x, x, 0x199, NC , ALWAYS )
GMM_FORMAT( BC4_UNORM , 64, 4, 4, 1, x, x, 0x189, NC , ALWAYS )
GMM_FORMAT( BC5_SNORM , 128, 4, 4, 1, x, x, 0x19A, NC , ALWAYS )
GMM_FORMAT( BC5_UNORM , 128, 4, 4, 1, x, x, 0x18A, NC , ALWAYS )
GMM_FORMAT( BC6H_SF16 , 128, 4, 4, 1, x, x, 0x1A1, NC , GEN(7) )
GMM_FORMAT( BC6H_UF16 , 128, 4, 4, 1, x, x, 0x1A4, NC , GEN(7) )
GMM_FORMAT( BC7_UNORM , 128, 4, 4, 1, x, x, 0x1A2, NC , GEN(7) )
GMM_FORMAT( BC7_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x1A3, NC , GEN(7) )
GMM_FORMAT( DXT1_RGB , 64, 4, 4, 1, x, x, 0x191, NC , ALWAYS )
GMM_FORMAT( DXT1_RGB_SRGB , 64, 4, 4, 1, x, x, 0x180, NC , ALWAYS )
GMM_FORMAT( EAC_R11 , 64, 4, 4, 1, x, x, 0x1AB, NC , GEN(8) || VLV2 )
GMM_FORMAT( EAC_RG11 , 128, 4, 4, 1, x, x, 0x1AC, NC , GEN(8) || VLV2 )
GMM_FORMAT( EAC_SIGNED_R11 , 64, 4, 4, 1, x, x, 0x1AD, NC , GEN(8) || VLV2 )
GMM_FORMAT( EAC_SIGNED_RG11 , 128, 4, 4, 1, x, x, 0x1AE, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC1_RGB8 , 64, 4, 4, 1, x, x, 0x1A9, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_EAC_RGBA8 , 128, 4, 4, 1, x, x, 0x1C2, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_EAC_SRGB8_A8 , 128, 4, 4, 1, x, x, 0x1C3, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_RGB8 , 64, 4, 4, 1, x, x, 0x1AA, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_RGB8_PTA , 64, 4, 4, 1, x, x, 0x1C0, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_SRGB8 , 64, 4, 4, 1, x, x, 0x1AF, NC , GEN(8) || VLV2 )
GMM_FORMAT( ETC2_SRGB8_PTA , 64, 4, 4, 1, x, x, 0x1C1, NC , GEN(8) || VLV2 )
GMM_FORMAT( FXT1 , 128, 8, 4, 1, x, x, 0x192, NC , ALWAYS )
GMM_FORMAT( I8_SINT , 8, 1, 1, 1, R, x, 0x155, NC , GEN(9) )
GMM_FORMAT( I8_UINT , 8, 1, 1, 1, R, x, 0x154, NC , GEN(9) )
GMM_FORMAT( I8_UNORM , 8, 1, 1, 1, R, x, 0x145, NC , ALWAYS )
GMM_FORMAT( I16_FLOAT , 16, 1, 1, 1, R, x, 0x115, NC , ALWAYS )
GMM_FORMAT( I16_UNORM , 16, 1, 1, 1, R, x, 0x111, NC , ALWAYS )
GMM_FORMAT( I24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E0, NC , ALWAYS )
GMM_FORMAT( I32_FLOAT , 32, 1, 1, 1, R, x, 0x0E3, NC , ALWAYS )
GMM_FORMAT( I32X32_FLOAT , 64, 1, 1, 1, R, x, 0x092, NC , ALWAYS )
GMM_FORMAT( L8_SINT , 8, 1, 1, 1, R, x, 0x153, NC , GEN(9) )
GMM_FORMAT( L8_UINT , 8, 1, 1, 1, R, x, 0x152, NC , GEN(9) )
GMM_FORMAT( L8_UNORM , 8, 1, 1, 1, R, x, 0x146, NC , ALWAYS )
GMM_FORMAT( L8_UNORM_SRGB , 8, 1, 1, 1, R, x, 0x14C, NC , ALWAYS )
GMM_FORMAT( L8A8_SINT , 16, 1, 1, 1, R, x, 0x127, NC , GEN(9) )
GMM_FORMAT( L8A8_UINT , 16, 1, 1, 1, R, x, 0x126, NC , GEN(9) )
GMM_FORMAT( L8A8_UNORM , 16, 1, 1, 1, R, x, 0x114, NC , ALWAYS )
GMM_FORMAT( L8A8_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x118, NC , ALWAYS )
GMM_FORMAT( L16_FLOAT , 16, 1, 1, 1, R, x, 0x116, NC , ALWAYS )
GMM_FORMAT( L16_UNORM , 16, 1, 1, 1, R, x, 0x112, NC , ALWAYS )
GMM_FORMAT( L16A16_FLOAT , 32, 1, 1, 1, R, x, 0x0F0, NC , ALWAYS )
GMM_FORMAT( L16A16_UNORM , 32, 1, 1, 1, R, x, 0x0DF, NC , ALWAYS )
GMM_FORMAT( L24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E1, NC , ALWAYS )
GMM_FORMAT( L32_FLOAT , 32, 1, 1, 1, R, x, 0x0E4, NC , ALWAYS )
GMM_FORMAT( L32_UNORM , 32, 1, 1, 1, R, x, 0x0DD, NC , ALWAYS )
GMM_FORMAT( L32A32_FLOAT , 64, 1, 1, 1, R, x, 0x08A, NC , ALWAYS )
GMM_FORMAT( L32X32_FLOAT , 64, 1, 1, 1, R, x, 0x091, NC , ALWAYS )
GMM_FORMAT( MONO8 , 1, 1, 1, 1, R, x, 0x18E, NC , x ) // No current GMM support by this name.
GMM_FORMAT( P2_UNORM_PALETTE0 , 2, 1, 1, 1, R, x, 0x184, NC , x ) // No current GMM support by this name.
GMM_FORMAT( P2_UNORM_PALETTE1 , 2, 1, 1, 1, R, x, 0x185, NC , x ) // "
GMM_FORMAT( P4A4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x147, NC , ALWAYS )
GMM_FORMAT( P4A4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14E, NC , ALWAYS )
GMM_FORMAT( P8_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x14B, NC , ALWAYS )
GMM_FORMAT( P8_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14D, NC , ALWAYS )
GMM_FORMAT( P8A8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x122, NC , ALWAYS )
GMM_FORMAT( P8A8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x123, NC , ALWAYS )
GMM_FORMAT( PACKED_422_16 , 64, 2, 1, 1, R, x, 0x1A7, NC , GEN(12) )
GMM_FORMAT( PLANAR_420_8 , 8, 1, 1, 1, R, x, 0x1A5, NC , x ) // No current GMM support by this name.
GMM_FORMAT( PLANAR_420_16 , 16, 1, 1, 1, R, x, 0x1A6, NC , x ) // "
GMM_FORMAT( PLANAR_422_8 , 8, 1, 1, 1, R, x, 0x00F, NC , x ) // <-- TODO(Minor): Remove this HW-internal format.
GMM_FORMAT( R1_UNORM , 1, 1, 1, 1, R, x, 0x181, NC , x ) // "
GMM_FORMAT( R8_SINT , 8, 1, 1, 1, R, x, 0x142, FC(3, 8, R, 8, S1), ALWAYS )
GMM_FORMAT( R8_SNORM , 8, 1, 1, 1, R, x, 0x141, FC(3, 8, R, 8, S), ALWAYS )
GMM_FORMAT( R8_SSCALED , 8, 1, 1, 1, R, x, 0x149, FC(3, 8, R, 8, S), ALWAYS )
GMM_FORMAT( R8_UINT , 8, 1, 1, 1, R, x, 0x143, FC(3, 8, R, 8, U1), ALWAYS )
GMM_FORMAT( R8_UNORM , 8, 1, 1, 1, R, x, 0x140, FC(3, 8, R, 8, U), ALWAYS )
GMM_FORMAT( R8_USCALED , 8, 1, 1, 1, R, x, 0x14A, FC(3, 8, R, 8, U), ALWAYS )
GMM_FORMAT( R8G8_SINT , 16, 1, 1, 1, R, x, 0x108, FC(3, 8, RG, 8, S), ALWAYS )
GMM_FORMAT( R8G8_SNORM , 16, 1, 1, 1, R, x, 0x107, FC(3, 8, RG, 8, S), ALWAYS )
GMM_FORMAT( R8G8_SSCALED , 16, 1, 1, 1, R, x, 0x11C, FC(3, 8, RG, 8, S), ALWAYS )
GMM_FORMAT( R8G8_UINT , 16, 1, 1, 1, R, x, 0x109, FC(3, 8, RG, 8, U), ALWAYS )
GMM_FORMAT( R8G8_UNORM , 16, 1, 1, 1, R, x, 0x106, FC(3, 8, RG, 8, U), ALWAYS )
GMM_FORMAT( R8G8_USCALED , 16, 1, 1, 1, R, x, 0x11D, FC(3, 8, RG, 8, U), ALWAYS )
GMM_FORMAT( R8G8B8_SINT , 24, 1, 1, 1, R, x, 0x1C9, NC , GEN(8) )
GMM_FORMAT( R8G8B8_SNORM , 24, 1, 1, 1, R, x, 0x194, NC , ALWAYS )
GMM_FORMAT( R8G8B8_SSCALED , 24, 1, 1, 1, R, x, 0x195, NC , ALWAYS )
GMM_FORMAT( R8G8B8_UINT , 24, 1, 1, 1, R, x, 0x1C8, NC , GEN(8) || VLV2 )
GMM_FORMAT( R8G8B8_UNORM , 24, 1, 1, 1, R, x, 0x193, NC , ALWAYS )
GMM_FORMAT( R8G8B8_UNORM_SRGB , 24, 1, 1, 1, R, x, 0x1A8, NC , GEN(7_5) )
GMM_FORMAT( R8G8B8_USCALED , 24, 1, 1, 1, R, x, 0x196, NC , ALWAYS )
GMM_FORMAT( R8G8B8A8_SINT , 32, 1, 1, 1, R, x, 0x0CA, FC(3, 8, RGBA, 8, S), ALWAYS )
GMM_FORMAT( R8G8B8A8_SNORM , 32, 1, 1, 1, R, x, 0x0C9, FC(3, 8, RGBA, 8, S), ALWAYS )
GMM_FORMAT( R8G8B8A8_SSCALED , 32, 1, 1, 1, R, x, 0x0F4, FC(3, 8, RGBA, 8, S), ALWAYS )
GMM_FORMAT( R8G8B8A8_UINT , 32, 1, 1, 1, R, x, 0x0CB, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( R8G8B8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C7, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( R8G8B8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C8, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( R8G8B8A8_USCALED , 32, 1, 1, 1, R, x, 0x0F5, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( R8G8B8X8_UNORM , 32, 1, 1, 1, R, x, 0x0EB, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( R8G8B8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EC, FC(3, 8, RGBA, 8, U), ALWAYS )
GMM_FORMAT( R9G9B9E5_SHAREDEXP , 32, 1, 1, 1, R, x, 0x0ED, NC , ALWAYS )
GMM_FORMAT( R10G10B10_FLOAT_A2_UNORM , 32, 1, 1, 1, R, x, 0x0D5, FC(3, x, RGB10A2, , ), GEN(12) )
GMM_FORMAT( R10G10B10_SNORM_A2_UNORM , 32, 1, 1, 1, R, x, 0x0C5, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( R10G10B10A2_SINT , 32, 1, 1, 1, R, x, 0x1B6, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( R10G10B10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B3, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( R10G10B10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B5, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( R10G10B10A2_UINT , 32, 1, 1, 1, R, x, 0x0C4, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( R10G10B10A2_UNORM , 32, 1, 1, 1, R, x, 0x0C2, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( R10G10B10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C3, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( R10G10B10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B4, FC(3, x, RGB10A2, , ), GEN(8) )
GMM_FORMAT( R10G10B10X2_USCALED , 32, 1, 1, 1, R, x, 0x0F3, FC(3, x, RGB10A2, , ), ALWAYS )
GMM_FORMAT( R11G11B10_FLOAT , 32, 1, 1, 1, R, x, 0x0D3, FC(3, x, RG11B10, , ), ALWAYS )
GMM_FORMAT( R16_FLOAT , 16, 1, 1, 1, R, x, 0x10E, FC(3, 16, R, 16, F1), ALWAYS )
GMM_FORMAT( R16_SINT , 16, 1, 1, 1, R, x, 0x10C, FC(3, 16, R, 16, S1), ALWAYS )
GMM_FORMAT( R16_SNORM , 16, 1, 1, 1, R, x, 0x10B, FC(3, 16, R, 16, S), ALWAYS )
GMM_FORMAT( R16_SSCALED , 16, 1, 1, 1, R, x, 0x11E, FC(3, 16, R, 16, S), ALWAYS )
GMM_FORMAT( R16_UINT , 16, 1, 1, 1, R, x, 0x10D, FC(3, 16, R, 16, U1), ALWAYS )
GMM_FORMAT( R16_UNORM , 16, 1, 1, 1, R, x, 0x10A, FC(3, 16, R, 16, U), ALWAYS )
GMM_FORMAT( R16_USCALED , 16, 1, 1, 1, R, x, 0x11F, FC(3, 16, R, 16, U), ALWAYS )
GMM_FORMAT( R16G16_FLOAT , 32, 1, 1, 1, R, x, 0x0D0, FC(3, 16, RG, 16, F), ALWAYS )
GMM_FORMAT( R16G16_SINT , 32, 1, 1, 1, R, x, 0x0CE, FC(3, 16, RG, 16, S), ALWAYS )
GMM_FORMAT( R16G16_SNORM , 32, 1, 1, 1, R, x, 0x0CD, FC(3, 16, RG, 16, S), ALWAYS )
GMM_FORMAT( R16G16_SSCALED , 32, 1, 1, 1, R, x, 0x0F6, FC(3, 16, RG, 16, S), ALWAYS )
GMM_FORMAT( R16G16_UINT , 32, 1, 1, 1, R, x, 0x0CF, FC(3, 16, RG, 16, U), ALWAYS )
GMM_FORMAT( R16G16_UNORM , 32, 1, 1, 1, R, x, 0x0CC, FC(3, 16, RG, 16, U), ALWAYS )
GMM_FORMAT( R16G16_USCALED , 32, 1, 1, 1, R, x, 0x0F7, FC(3, 16, RG, 16, U), ALWAYS )
GMM_FORMAT( R16G16B16_FLOAT , 48, 1, 1, 1, R, x, 0x19B, NC , ALWAYS )
GMM_FORMAT( R16G16B16_SINT , 48, 1, 1, 1, R, x, 0x1B1, NC , GEN(8) )
GMM_FORMAT( R16G16B16_SNORM , 48, 1, 1, 1, R, x, 0x19D, NC , ALWAYS )
GMM_FORMAT( R16G16B16_SSCALED , 48, 1, 1, 1, R, x, 0x19E, NC , ALWAYS )
GMM_FORMAT( R16G16B16_UINT , 48, 1, 1, 1, R, x, 0x1B0, NC , GEN(8) || VLV2 )
GMM_FORMAT( R16G16B16_UNORM , 48, 1, 1, 1, R, x, 0x19C, NC , ALWAYS )
GMM_FORMAT( R16G16B16_USCALED , 48, 1, 1, 1, R, x, 0x19F, NC , ALWAYS )
GMM_FORMAT( R16G16B16A16_FLOAT , 64, 1, 1, 1, R, x, 0x084, FC(3, 16, RGBA, 16, F), ALWAYS )
GMM_FORMAT( R16G16B16A16_SINT , 64, 1, 1, 1, R, x, 0x082, FC(3, 16, RGBA, 16, S), ALWAYS )
GMM_FORMAT( R16G16B16A16_SNORM , 64, 1, 1, 1, R, x, 0x081, FC(3, 16, RGBA, 16, S), ALWAYS )
GMM_FORMAT( R16G16B16A16_SSCALED , 64, 1, 1, 1, R, x, 0x093, FC(3, 16, RGBA, 16, S), ALWAYS )
GMM_FORMAT( R16G16B16A16_UINT , 64, 1, 1, 1, R, x, 0x083, FC(3, 16, RGBA, 16, U), ALWAYS )
GMM_FORMAT( R16G16B16A16_UNORM , 64, 1, 1, 1, R, x, 0x080, FC(3, 16, RGBA, 16, U), ALWAYS )
GMM_FORMAT( R16G16B16A16_USCALED , 64, 1, 1, 1, R, x, 0x094, FC(3, 16, RGBA, 16, U), ALWAYS )
GMM_FORMAT( R16G16B16X16_FLOAT , 64, 1, 1, 1, R, x, 0x08F, FC(3, 16, RGBA, 16, F), ALWAYS )
GMM_FORMAT( R16G16B16X16_UNORM , 64, 1, 1, 1, R, x, 0x08E, FC(3, 16, RGBA, 16, U), ALWAYS )
GMM_FORMAT( R24_UNORM_X8_TYPELESS , 32, 1, 1, 1, R, x, 0x0D9, FC(3, 32, R, 32, U1), ALWAYS )
GMM_FORMAT( R32_FLOAT , 32, 1, 1, 1, R, x, 0x0D8, FC(3, 32, R, 32, F1), ALWAYS )
GMM_FORMAT( R32_FLOAT_X8X24_TYPELESS , 64, 1, 1, 1, R, x, 0x088, FC(3, 32, R, 32, F), ALWAYS )
GMM_FORMAT( R32_SFIXED , 32, 1, 1, 1, R, x, 0x1B2, FC(3, 32, R, 32, S), GEN(8) )
GMM_FORMAT( R32_SINT , 32, 1, 1, 1, R, x, 0x0D6, FC(3, 32, R, 32, S1), ALWAYS )
GMM_FORMAT( R32_SNORM , 32, 1, 1, 1, R, x, 0x0F2, FC(3, 32, R, 32, S), ALWAYS )
GMM_FORMAT( R32_SSCALED , 32, 1, 1, 1, R, x, 0x0F8, FC(3, 32, R, 32, S), ALWAYS )
GMM_FORMAT( R32_UINT , 32, 1, 1, 1, R, x, 0x0D7, FC(3, 32, R, 32, U1), ALWAYS )
GMM_FORMAT( R32_UNORM , 32, 1, 1, 1, R, x, 0x0F1, FC(3, 32, R, 32, U), ALWAYS )
GMM_FORMAT( R32_USCALED , 32, 1, 1, 1, R, x, 0x0F9, FC(3, 32, R, 32, U), ALWAYS )
GMM_FORMAT( R32G32_FLOAT , 64, 1, 1, 1, R, x, 0x085, FC(3, 32, RG, 32, F), ALWAYS )
GMM_FORMAT( R32G32_SFIXED , 64, 1, 1, 1, R, x, 0x0A0, FC(3, 32, RG, 32, S), ALWAYS )
GMM_FORMAT( R32G32_SINT , 64, 1, 1, 1, R, x, 0x086, FC(3, 32, RG, 32, S), ALWAYS )
GMM_FORMAT( R32G32_SNORM , 64, 1, 1, 1, R, x, 0x08C, FC(3, 32, RG, 32, S), ALWAYS )
GMM_FORMAT( R32G32_SSCALED , 64, 1, 1, 1, R, x, 0x095, FC(3, 32, RG, 32, S), ALWAYS )
GMM_FORMAT( R32G32_UINT , 64, 1, 1, 1, R, x, 0x087, FC(3, 32, RG, 32, U), ALWAYS )
GMM_FORMAT( R32G32_UNORM , 64, 1, 1, 1, R, x, 0x08B, FC(3, 32, RG, 32, U), ALWAYS )
GMM_FORMAT( R32G32_USCALED , 64, 1, 1, 1, R, x, 0x096, FC(3, 32, RG, 32, U), ALWAYS )
GMM_FORMAT( R32G32B32_FLOAT , 96, 1, 1, 1, R, x, 0x040, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SFIXED , 96, 1, 1, 1, R, x, 0x050, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SINT , 96, 1, 1, 1, R, x, 0x041, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SNORM , 96, 1, 1, 1, R, x, 0x044, NC , ALWAYS )
GMM_FORMAT( R32G32B32_SSCALED , 96, 1, 1, 1, R, x, 0x045, NC , ALWAYS )
GMM_FORMAT( R32G32B32_UINT , 96, 1, 1, 1, R, x, 0x042, NC , ALWAYS )
GMM_FORMAT( R32G32B32_UNORM , 96, 1, 1, 1, R, x, 0x043, NC , ALWAYS )
GMM_FORMAT( R32G32B32_USCALED , 96, 1, 1, 1, R, x, 0x046, NC , ALWAYS )
GMM_FORMAT( R32G32B32A32_FLOAT , 128, 1, 1, 1, R, x, 0x000, FC(3, 32, RGBA, 32, F), ALWAYS )
GMM_FORMAT( R32G32B32A32_SFIXED , 128, 1, 1, 1, R, x, 0x020, FC(3, 32, RGBA, 32, S), ALWAYS )
GMM_FORMAT( R32G32B32A32_SINT , 128, 1, 1, 1, R, x, 0x001, FC(3, 32, RGBA, 32, S), ALWAYS )
GMM_FORMAT( R32G32B32A32_SNORM , 128, 1, 1, 1, R, x, 0x004, FC(3, 32, RGBA, 32, S), ALWAYS )
GMM_FORMAT( R32G32B32A32_SSCALED , 128, 1, 1, 1, R, x, 0x007, FC(3, 32, RGBA, 32, S), ALWAYS )
GMM_FORMAT( R32G32B32A32_UINT , 128, 1, 1, 1, R, x, 0x002, FC(3, 32, RGBA, 32, U), ALWAYS )
GMM_FORMAT( R32G32B32A32_UNORM , 128, 1, 1, 1, R, x, 0x003, FC(3, 32, RGBA, 32, U), ALWAYS )
GMM_FORMAT( R32G32B32A32_USCALED , 128, 1, 1, 1, R, x, 0x008, FC(3, 32, RGBA, 32, U), ALWAYS )
GMM_FORMAT( R32G32B32X32_FLOAT , 128, 1, 1, 1, R, x, 0x006, FC(3, 32, RGBA, 32, F), ALWAYS )
GMM_FORMAT( R5G5_SNORM_B6_UNORM , 16, 1, 1, 1, R, x, 0x119, NC , ALWAYS )
GMM_FORMAT( R64_FLOAT , 64, 1, 1, 1, R, x, 0x08D, NC , ALWAYS )
GMM_FORMAT( R64_PASSTHRU , 64, 1, 1, 1, R, x, 0x0A1, NC , ALWAYS )
GMM_FORMAT( R64G64_FLOAT , 128, 1, 1, 1, R, x, 0x005, NC , ALWAYS )
GMM_FORMAT( R64G64_PASSTHRU , 128, 1, 1, 1, R, x, 0x021, NC , ALWAYS )
GMM_FORMAT( R64G64B64_FLOAT , 192, 1, 1, 1, R, x, 0x198, NC , ALWAYS )
GMM_FORMAT( R64G64B64_PASSTHRU , 192, 1, 1, 1, R, x, 0x1BD, NC , GEN(8) )
GMM_FORMAT( R64G64B64A64_FLOAT , 256, 1, 1, 1, R, x, 0x197, NC , ALWAYS )
GMM_FORMAT( R64G64B64A64_PASSTHRU , 256, 1, 1, 1, R, x, 0x1BC, NC , GEN(8) )
GMM_FORMAT( RAW , 8, 1, 1, 1, R, x, 0x1FF, NC , GEN(7) ) // "8bpp" for current GMM implementation.
GMM_FORMAT( X24_TYPELESS_G8_UINT , 32, 1, 1, 1, R, x, 0x0DA, FC(3, 32, R, 32, U1), ALWAYS )
GMM_FORMAT( X32_TYPELESS_G8X24_UINT , 64, 1, 1, 1, R, x, 0x089, FC(3, 32, RG, 32, U), ALWAYS )
GMM_FORMAT( X8B8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E6, NC , ALWAYS )
GMM_FORMAT( Y8_UNORM , 8, 1, 1, 1, R, x, 0x150, FC(2, x, NV12, , ), ALWAYS )
GMM_FORMAT( YCRCB_NORMAL , 16, 1, 1, 1, R, x, 0x182, FC(2, x, YUY2, , ), ALWAYS )
GMM_FORMAT( YCRCB_SWAPUV , 16, 1, 1, 1, R, x, 0x18F, FC(2, x, YCRCB_SWAPUV, ,), ALWAYS )
GMM_FORMAT( YCRCB_SWAPUVY , 16, 1, 1, 1, R, x, 0x183, FC(2, x, YCRCB_SWAPUVY,,), ALWAYS )
GMM_FORMAT( YCRCB_SWAPY , 16, 1, 1, 1, R, x, 0x190, FC(2, x, YCRCB_SWAPY, , ), ALWAYS )
#endif // INCLUDE_SURFACESTATE_FORMATS
#ifdef INCLUDE_ASTC_FORMATS
GMM_FORMAT( ASTC_FULL_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x140, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x148, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x149, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x151, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x152, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x161, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x162, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x164, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x171, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x172, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x174, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x176, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x17e, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x17f, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x1c0, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x1d0, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x1d4, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x1d5, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x1e5, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x1e9, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x1ea, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x1fa, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x1fe, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x1ff, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x040, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_4x4_U8sRGB , 128, 4, 4, 1, x, A, 0x000, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x048, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x4_U8sRGB , 128, 5, 4, 1, x, A, 0x008, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x049, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x5_U8sRGB , 128, 5, 5, 1, x, A, 0x009, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x051, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x5_U8sRGB , 128, 6, 5, 1, x, A, 0x011, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x052, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x6_U8sRGB , 128, 6, 6, 1, x, A, 0x012, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x061, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x5_U8sRGB , 128, 8, 5, 1, x, A, 0x021, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x062, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x6_U8sRGB , 128, 8, 6, 1, x, A, 0x022, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x064, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x8_U8sRGB , 128, 8, 8, 1, x, A, 0x024, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x071, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x5_U8sRGB , 128, 10, 5, 1, x, A, 0x031, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x072, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x6_U8sRGB , 128, 10, 6, 1, x, A, 0x032, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x074, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x8_U8sRGB , 128, 10, 8, 1, x, A, 0x034, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x076, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x10_U8sRGB , 128, 10, 10, 1, x, A, 0x036, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x07e, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x10_U8sRGB , 128, 12, 10, 1, x, A, 0x03e, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x07f, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x12_U8sRGB , 128, 12, 12, 1, x, A, 0x03f, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_3D_3x3x3_U8sRGB , 128, 3, 3, 3, x, A, 0x080, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x0c0, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x3x3_U8sRGB , 128, 4, 3, 3, x, A, 0x090, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x0d0, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x3_U8sRGB , 128, 4, 4, 3, x, A, 0x094, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x0d4, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x4_U8sRGB , 128, 4, 4, 4, x, A, 0x095, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x0d5, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x4x4_U8sRGB , 128, 5, 4, 4, x, A, 0x0a5, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x0e5, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x4_U8sRGB , 128, 5, 5, 4, x, A, 0x0a9, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x0e9, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x5_U8sRGB , 128, 5, 5, 5, x, A, 0x0aa, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x0ea, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x5x5_U8sRGB , 128, 6, 5, 5, x, A, 0x0ba, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x0fa, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x5_U8sRGB , 128, 6, 6, 5, x, A, 0x0be, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x0fe, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x6_U8sRGB , 128, 6, 6, 6, x, A, 0x0bf, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x0ff, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x140, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x148, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x149, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x151, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x152, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x161, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x162, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x164, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x171, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x172, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x174, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x176, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x17e, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x17f, NC , ASTC_HDR_2D )
GMM_FORMAT( ASTC_FULL_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x1c0, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x1d0, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x1d4, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x1d5, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x1e5, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x1e9, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x1ea, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x1fa, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x1fe, NC , ASTC_3D )
GMM_FORMAT( ASTC_FULL_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x1ff, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x040, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_4x4_U8sRGB , 128, 4, 4, 1, x, A, 0x000, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x048, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x4_U8sRGB , 128, 5, 4, 1, x, A, 0x008, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x049, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_5x5_U8sRGB , 128, 5, 5, 1, x, A, 0x009, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x051, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x5_U8sRGB , 128, 6, 5, 1, x, A, 0x011, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x052, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_6x6_U8sRGB , 128, 6, 6, 1, x, A, 0x012, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x061, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x5_U8sRGB , 128, 8, 5, 1, x, A, 0x021, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x062, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x6_U8sRGB , 128, 8, 6, 1, x, A, 0x022, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x064, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_8x8_U8sRGB , 128, 8, 8, 1, x, A, 0x024, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x071, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x5_U8sRGB , 128, 10, 5, 1, x, A, 0x031, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x072, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x6_U8sRGB , 128, 10, 6, 1, x, A, 0x032, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x074, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x8_U8sRGB , 128, 10, 8, 1, x, A, 0x034, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x076, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_10x10_U8sRGB , 128, 10, 10, 1, x, A, 0x036, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x07e, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x10_U8sRGB , 128, 12, 10, 1, x, A, 0x03e, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x07f, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_2D_12x12_U8sRGB , 128, 12, 12, 1, x, A, 0x03f, NC , ASTC_LDR_2D )
GMM_FORMAT( ASTC_LDR_3D_3x3x3_U8sRGB , 128, 3, 3, 3, x, A, 0x080, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x0c0, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x3x3_U8sRGB , 128, 4, 3, 3, x, A, 0x090, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x0d0, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x3_U8sRGB , 128, 4, 4, 3, x, A, 0x094, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x0d4, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x4_U8sRGB , 128, 4, 4, 4, x, A, 0x095, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x0d5, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x4x4_U8sRGB , 128, 5, 4, 4, x, A, 0x0a5, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x0e5, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x4_U8sRGB , 128, 5, 5, 4, x, A, 0x0a9, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x0e9, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x5_U8sRGB , 128, 5, 5, 5, x, A, 0x0aa, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x0ea, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x5x5_U8sRGB , 128, 6, 5, 5, x, A, 0x0ba, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x0fa, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x5_U8sRGB , 128, 6, 6, 5, x, A, 0x0be, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x0fe, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x6_U8sRGB , 128, 6, 6, 6, x, A, 0x0bf, NC , ASTC_3D )
GMM_FORMAT( ASTC_LDR_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x0ff, NC , ASTC_3D )
#endif // INCLUDE_ASTC_FORMATS
#ifdef INCLUDE_MISC_FORMATS
GMM_FORMAT( AUYV , 32, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( AYUV , 32, 1, 1, 1, R, x, NA , 0x9 , ALWAYS )
GMM_FORMAT( BAYER_BGGR8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
GMM_FORMAT( BAYER_BGGR16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
GMM_FORMAT( BAYER_GBRG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
GMM_FORMAT( BAYER_GBRG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
GMM_FORMAT( BAYER_GRBG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
GMM_FORMAT( BAYER_GRBG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
GMM_FORMAT( BAYER_RGGB8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
GMM_FORMAT( BAYER_RGGB16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
GMM_FORMAT( BC1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
GMM_FORMAT( BC2 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC3 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC4 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC6 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC6H , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC7 , 128, 4, 4, 1, x, x, NA , NC , GEN(7) ) // "
GMM_FORMAT( BGRP , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // FOURCC:BGRP
GMM_FORMAT( D16_UNORM , 16, 1, 1, 1, x, x, NA , 0x10, ALWAYS ) //Depth uses color format L1e.En
GMM_FORMAT( D24_UNORM_X8_UINT , 32, 1, 1, 1, x, x, NA , 0x11, ALWAYS )
GMM_FORMAT( D32_FLOAT , 32, 1, 1, 1, x, x, NA , 0x11, ALWAYS )
GMM_FORMAT( DXT1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
GMM_FORMAT( DXT2_5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( ETC1 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
GMM_FORMAT( ETC2 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
GMM_FORMAT( ETC2_EAC , 128, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
GMM_FORMAT( GENERIC_8BIT , 8, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_16BIT , 16, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_24BIT , 24, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_32BIT , 32, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_48BIT , 48, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_64BIT , 64, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_96BIT , 96, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_128BIT , 128, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_192BIT , 192, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( GENERIC_256BIT , 256, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( I420 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // Same as IYUV.
GMM_FORMAT( IYUV , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( IMC1 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( IMC2 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( IMC3 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( IMC4 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( L4A4 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( MFX_JPEG_YUV411 , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV411R , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV420 , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) ) // Same as IMC3.
GMM_FORMAT( MFX_JPEG_YUV422H , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV422V , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV444 , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
GMM_FORMAT( NV11 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( NV12 , 8, 1, 1, 1, R, x, NA , 0xF , ALWAYS )
GMM_FORMAT( NV21 , 8, 1, 1, 1, R, x, NA , 0xF , ALWAYS )
GMM_FORMAT( P8 , 8, 1, 1, 1, R, x, NA, NC , ALWAYS )
GMM_FORMAT( P010 , 16, 1, 1, 1, R, x, NA , 0x7 , ALWAYS )
GMM_FORMAT( P012 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( P016 , 16, 1, 1, 1, R, x, NA , 0x8 , ALWAYS )
GMM_FORMAT( P208 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( R10G10B10_XR_BIAS_A2_UNORM , 32, 1, 1, 1, x, x, NA , 0x18, ALWAYS ) // DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM
GMM_FORMAT( R24G8_TYPELESS , 32, 1, 1, 1, x, x, NA , 0x11, ALWAYS ) // DXGI_FORMAT_R24G8_TYPELESS (To differentiate between GENERIC_32BIT.)
GMM_FORMAT( R32G8X24_TYPELESS , 64, 1, 1, 1, x, x, NA , 0x11, ALWAYS ) // DXGI_FORMAT_R32G8X24_TYPELESS (To differentiate between GENERIC_64BIT.)
GMM_FORMAT( RENDER_8BIT , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( RGBP , 8, 1, 1, 1, R, x, NA , 0xF , ALWAYS ) // FOURCC:RGBP
GMM_FORMAT( Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( Y8_UNORM_VA , 8, 1, 1, 1, x, x, NA , 0xF , GEN(8) )
GMM_FORMAT( Y16_SNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
GMM_FORMAT( Y16_UNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
GMM_FORMAT( AUYV , 32, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( AYUV , 32, 1, 1, 1, R, x, NA , FC(2, x, AYUV, , ), ALWAYS )
GMM_FORMAT( BAYER_BGGR8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
GMM_FORMAT( BAYER_BGGR16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
GMM_FORMAT( BAYER_GBRG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
GMM_FORMAT( BAYER_GBRG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
GMM_FORMAT( BAYER_GRBG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
GMM_FORMAT( BAYER_GRBG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
GMM_FORMAT( BAYER_RGGB8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
GMM_FORMAT( BAYER_RGGB16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
GMM_FORMAT( BC1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
GMM_FORMAT( BC2 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC3 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC4 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC6 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC6H , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( BC7 , 128, 4, 4, 1, x, x, NA , NC , GEN(7) ) // "
GMM_FORMAT( BGRP , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // FOURCC:BGRP
GMM_FORMAT( D16_UNORM , 16, 1, 1, 1, x, x, NA , FC(3, 16, R, 16, U), ALWAYS ) //Depth uses color format L1e.En
GMM_FORMAT( D24_UNORM_X8_UINT , 32, 1, 1, 1, x, x, NA , FC(3, 32, R, 32, U1), ALWAYS )
GMM_FORMAT( D32_FLOAT , 32, 1, 1, 1, x, x, NA , FC(3, 32, R, 32, F1), ALWAYS )
GMM_FORMAT( DXT1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
GMM_FORMAT( DXT2_5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
GMM_FORMAT( ETC1 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
GMM_FORMAT( ETC2 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
GMM_FORMAT( ETC2_EAC , 128, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
GMM_FORMAT( GENERIC_8BIT , 8, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_16BIT , 16, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_24BIT , 24, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_32BIT , 32, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_48BIT , 48, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_64BIT , 64, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_96BIT , 96, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_128BIT , 128, 1, 1, 1, x, x, NA , NC , ALWAYS )
GMM_FORMAT( GENERIC_192BIT , 192, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( GENERIC_256BIT , 256, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( I420 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // Same as IYUV.
GMM_FORMAT( IYUV , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( IMC1 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
GMM_FORMAT( IMC2 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
GMM_FORMAT( IMC3 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
GMM_FORMAT( IMC4 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
GMM_FORMAT( L4A4 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS ) // A4L4. No HW support.
GMM_FORMAT( MFX_JPEG_YUV411 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV411R , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV420 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) ) // Same as IMC3.
GMM_FORMAT( MFX_JPEG_YUV422H , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV422V , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
GMM_FORMAT( MFX_JPEG_YUV444 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
GMM_FORMAT( NV11 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( NV12 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
GMM_FORMAT( NV21 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
GMM_FORMAT( P8 , 8, 1, 1, 1, R, x, NA, NC , ALWAYS )
GMM_FORMAT( P010 , 16, 1, 1, 1, R, x, NA , FC(2, x, P010, , ), ALWAYS )
GMM_FORMAT( P012 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( P016 , 16, 1, 1, 1, R, x, NA , FC(2, x, P016, , ), ALWAYS )
GMM_FORMAT( P208 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( R10G10B10_XR_BIAS_A2_UNORM , 32, 1, 1, 1, x, x, NA , FC(2, x, RGB10A2, , ), ALWAYS ) // DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM
GMM_FORMAT( R24G8_TYPELESS , 32, 1, 1, 1, x, x, NA , FC(2, 32, R, 32, U), ALWAYS ) // DXGI_FORMAT_R24G8_TYPELESS (To differentiate between GENERIC_32BIT.)
GMM_FORMAT( R32G8X24_TYPELESS , 64, 1, 1, 1, x, x, NA , FC(2, 32, R, 32, U), ALWAYS ) // DXGI_FORMAT_R32G8X24_TYPELESS (To differentiate between GENERIC_64BIT.)
GMM_FORMAT( RENDER_8BIT , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( RGBP , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS ) // FOURCC:RGBP
GMM_FORMAT( Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( Y8_UNORM_VA , 8, 1, 1, 1, x, x, NA , FC(2, x, NV12, , ), GEN(8) )
GMM_FORMAT( Y16_SNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
GMM_FORMAT( Y16_UNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
#if (IGFX_GEN >= IGFX_GEN10)
GMM_FORMAT( Y32_UNORM , 32, 1, 1, 1, x, x, NA , NC , GEN(10) )
GMM_FORMAT( Y32_UNORM , 32, 1, 1, 1, x, x, NA , NC , GEN(10) ) // Y32 removed from Gen9 but still referenced, only available Gen10+
#endif
GMM_FORMAT( Y210 , 64, 2, 1, 1, R, x, NA , 0x5 , GEN(11) ) // Packed 422 10/12/16 bit
GMM_FORMAT( Y212 , 64, 2, 1, 1, R, x, NA , 0x5 , GEN(11) )
GMM_FORMAT( Y410 , 32, 1, 1, 1, R, x, NA , 0x4 , GEN(11) )
GMM_FORMAT( Y412 , 64, 1, 1, 1, R, x, NA , 0x6 , GEN(11) )
GMM_FORMAT( Y216 , 64, 2, 1, 1, R, x, NA, 0x5, ALWAYS )
GMM_FORMAT( Y416 , 64, 1, 1, 1, R, x, NA , 0x6 , ALWAYS ) // Packed 444 10/12/16 bit,
GMM_FORMAT( YV12 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( YVU9 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( Y210 , 64, 2, 1, 1, R, x, NA , FC(2, x, Y210, , ), GEN(11) ) // Packed 422 10/12/16 bit
GMM_FORMAT( Y212 , 64, 2, 1, 1, R, x, NA , FC(2, x, Y216, , ), GEN(11) )
GMM_FORMAT( Y410 , 32, 1, 1, 1, R, x, NA , FC(2, x, Y410, , ), GEN(11) )
GMM_FORMAT( Y412 , 64, 1, 1, 1, R, x, NA , FC(2, x, Y416, , ), GEN(11) )
GMM_FORMAT( Y216 , 64, 2, 1, 1, R, x, NA, FC(2, x, Y216, , ), ALWAYS )
GMM_FORMAT( Y416 , 64, 1, 1, 1, R, x, NA , FC(2, x, Y416, , ), ALWAYS ) // Packed 444 10/12/16 bit,
GMM_FORMAT( YV12 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( YVU9 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
// Implement packed 4:2:2 YUV format (UYVY, VYUY, YUY2, YVYU) as compressed block format by suffixing _2x1.(i.e. 32bpe 2x1 pixel blocks instead of 16bpp 1x1 block)
// All OS components(UMDs/KMD) can switch to *_2x1 style independent of legacy implementation.
// Refer GmmCommonExt.h for legacy implemenation of UYVY, VYUY, YUY2, YVYU)
// TODO : Unify them when all OS-components switch to compressed block format
GMM_FORMAT( UYVY_2x1 , 32, 2, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( VYUY_2x1 , 32, 2, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( YUY2_2x1 , 32, 2, 1, 1, R, x, NA , 0x3 , ALWAYS )
GMM_FORMAT( YVYU_2x1 , 32, 2, 1, 1, R, x, NA , NC , ALWAYS )
GMM_FORMAT( MEDIA_Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( MEDIA_Y8_UNORM , 8, 1, 1, 1, x, x, NA , 0xF , GEN(8) )
GMM_FORMAT( MEDIA_Y16_SNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
GMM_FORMAT( MEDIA_Y16_UNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
GMM_FORMAT( MEDIA_Y32_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) ) // Y32 is BDW name for SKL Y1, and is 1bpp with 32b granularity
GMM_FORMAT( UYVY_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, SWAPY, , ), ALWAYS )
GMM_FORMAT( VYUY_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, SWAPUVY, , ), ALWAYS )
GMM_FORMAT( YUY2_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, YUY2, , ), ALWAYS )
GMM_FORMAT( YVYU_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, SWAPUV, , ), ALWAYS )
GMM_FORMAT( MEDIA_Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
GMM_FORMAT( MEDIA_Y8_UNORM , 8, 1, 1, 1, x, x, NA , FC(2, x, NV12, , ), GEN(8) )
GMM_FORMAT( MEDIA_Y16_SNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
GMM_FORMAT( MEDIA_Y16_UNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
GMM_FORMAT( MEDIA_Y32_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) ) // Y32 is BDW name for SKL Y1, and is 1bpp with 32b granularity
GMM_FORMAT( B16G16R16A16_UNORM , 64, 1, 1, 1, R, x, NA , FC(3, 16, RGBA, 16, U), ALWAYS ) // Swapped ARGB16 for media-SFC output
#if _WIN32
GMM_FORMAT( WGBOX_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
GMM_FORMAT( WGBOX_PLANAR_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
GMM_FORMAT( WGBOX_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
GMM_FORMAT( WGBOX_PLANAR_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
#endif
#endif // INCLUDE_MISC_FORMATS
@ -497,6 +507,7 @@ GMM_FORMAT( WGBOX_PLANAR_YUV444 , 32, 1, 1, 1, x, x, NA , NC ,
#undef INCLUDE_MISC_FORMATS
#undef INCLUDE_SURFACESTATE_FORMATS
#undef NA
#undef NC
#undef R
#undef SKU
#undef VLV2

View File

@ -73,10 +73,16 @@ OTHER DEALINGS IN THE SOFTWARE.
#define GMM_ENABLE_GEN11 0
#endif
#if (!defined(GMM_GFX_GEN) || (GMM_GFX_GEN == 120))
#define GMM_ENABLE_GEN12 1
#else
#define GMM_ENABLE_GEN12 0
#endif
#if (IGFX_GEN >= IGFX_GEN11)
#if !(GMM_ENABLE_GEN8 || GMM_ENABLE_GEN9 || GMM_ENABLE_GEN10 || \
GMM_ENABLE_GEN11)
GMM_ENABLE_GEN11 || GMM_ENABLE_GEN12)
#error "Unrecognized GMM_GFX_GEN !"
#endif
#elif (IGFX_GEN >= IGFX_GEN10)

View File

@ -29,7 +29,7 @@ OTHER DEALINGS IN THE SOFTWARE.
#if defined(_WIN64)
#define GMM_UMD_DLL "igdgmm64.dll"
#else
#define GMM_UMD_DLL "libigdgmm.so.9"
#define GMM_UMD_DLL "libigdgmm.so.10"
#endif
#else
#define GMM_ENTRY_NAME "_OpenGmm@4"
@ -40,6 +40,6 @@ OTHER DEALINGS IN THE SOFTWARE.
#if defined(_WIN32)
#define GMM_UMD_DLL "igdgmm32.dll"
#else
#define GMM_UMD_DLL "libigdgmm.so.9"
#define GMM_UMD_DLL "libigdgmm.so.10"
#endif
#endif

View File

@ -67,7 +67,10 @@ typedef struct GMM_FORMAT_ENTRY_REC
uint8_t Width;
} Element;
GMM_SURFACESTATE_FORMAT SurfaceStateFormat;
uint32_t Reserved;
union {
GMM_E2ECOMP_FORMAT AuxL1eFormat;
uint8_t CompressionFormat;
} CompressionFormat;
}GMM_FORMAT_ENTRY;
//===========================================================================
@ -267,6 +270,8 @@ typedef struct __GMM_PLATFORM_RESOURCE_REC
uint8_t HiZPixelsPerByte; //HiZ-Bpp is < 1, keep inverse
uint64_t ReconMaxHeight;
uint64_t ReconMaxWidth;
uint8_t NoOfBitsSupported; // No of bits supported for System physcial address on GPU
uint64_t HighestAcceptablePhysicalAddress; // Highest acceptable System physical Address
}__GMM_PLATFORM_RESOURCE, GMM_PLATFORM_INFO;
//***************************************************************************

View File

@ -148,6 +148,8 @@ typedef struct GMM_RESOURCE_FLAG_REC
uint32_t __ForceOtherHVALIGN4 : 1;
uint32_t DisableDisplayCcsClearColor : 1; // Disables display clear color
uint32_t DisableDisplayCcsCompression : 1; // Disables display decompression on the surface (it disables display awareness of both fast clear/render compression)
uint32_t PreGen12FastClearOnly : 1; // i.e. AUX_CCS_D (instead of AUX_CCS_E). Flag carried by GMM between UMD’s to support shared resources.
uint32_t Reserved : 1; // Reserved
} Wa;
} GMM_RESOURCE_FLAG;

View File

@ -33,16 +33,27 @@ OTHER DEALINGS IN THE SOFTWARE.
#include "External/Common/GmmCommonExt.h"
#include "External/Common/GmmPlatformExt.h"
#include "External/Common/GmmCachePolicy.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen8.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen9.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen10.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen11.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen12.h"
#include "External/Common/GmmResourceInfoExt.h"
#include "../Platform/GmmPlatforms.h"
#include "Platform/GmmGen8Platform.h"
#include "Platform/GmmGen9Platform.h"
#include "Platform/GmmGen10Platform.h"
#include "Platform/GmmGen11Platform.h"
#include "Platform/GmmGen12Platform.h"
#include "External/Common/GmmTextureExt.h"
#include "../Texture/GmmTexture.h"
#include "Texture/GmmTextureCalc.h"
#include "Texture/GmmGen7TextureCalc.h"
#include "Texture/GmmGen8TextureCalc.h"
#include "Texture/GmmGen9TextureCalc.h"
#include "Texture/GmmGen10TextureCalc.h"
#include "Texture/GmmGen11TextureCalc.h"
#include "Texture/GmmGen12TextureCalc.h"
#include "External/Common/GmmResourceInfo.h"
#include "External/Common/GmmInfoExt.h"
#include "External/Common/GmmInfo.h"

View File

@ -0,0 +1,104 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#pragma once
#include "GmmGen11Platform.h"
typedef struct __CCS_UNIT
{
ALIGNMENT Align;
struct
{
//represents downscale factor if msb = 0,
// upscale factor if msb = 1,
//factor value is absolute (+ve)
int32_t Width;
int32_t Height;
uint32_t Depth; //Depth slices or Samples sharing CCS$line
} Downscale;
} CCS_UNIT;
//Gen12 CCS supported on Yf/Ys 2D/MSAA/3D tiling
#define CCS_MODES (GMM_TILE_MODES - TILE_YF_2D_8bpe)
#define CCS_MODE(x) (x >= TILE_YF_2D_8bpe) ? (x - TILE_YF_2D_8bpe) : CCS_MODES
typedef enum _FC_TileType
{
FC_TILE_Y,
FC_TILE_YF,
FC_TILE_YS,
//max equals last supported plus one
FC_TILE_MAX
} FC_TILE_TYPE;
#define FCTilingType(x) (((x) == LEGACY_TILE_Y) ? (FC_TILE_Y) : \
(((x) >= TILE_YF_2D_8bpe && (x) <= TILE_YF_2D_128bpe) ? (FC_TILE_YF) : \
(((x) >= TILE_YS_2D_8bpe && (x) <= TILE_YS_2D_128bpe) ? (FC_TILE_YS) : \
(FC_TILE_MAX))))
#define FCMaxBppModes 5
#define FCMaxModes FC_TILE_MAX * FCMaxBppModes
#define FCBppMode(bpp) __GmmLog2(bpp) - 3
#define FCMode(TileMode, bpp) (FCTilingType(TileMode) < FC_TILE_MAX) ? (FCTilingType(TileMode) * FCMaxBppModes + FCBppMode(bpp)) : FCMaxModes
//===========================================================================
// typedef:
// GMM_TEXTURE_ALIGN_EX
//
// Description:
// The following struct extends the texture mip map unit alignment
// required for each map format. The alignment values are platform
// dependent.
//
//---------------------------------------------------------------------------
typedef struct GMM_TEXTURE_ALIGN_EX_REC
{
CCS_UNIT CCSEx[CCS_MODES];
}GMM_TEXTURE_ALIGN_EX;
#ifdef __cplusplus
namespace GmmLib
{
class NON_PAGED_SECTION PlatformInfoGen12 : public PlatformInfoGen11
{
protected:
GMM_TEXTURE_ALIGN_EX TexAlignEx;
CCS_UNIT FCTileMode[FCMaxModes];
public:
PlatformInfoGen12(PLATFORM &Platform);
~PlatformInfoGen12(){};
virtual GMM_TEXTURE_ALIGN_EX GetExTextureAlign() { return TexAlignEx; }
virtual void ApplyExtendedTexAlign(uint32_t CCSMode, ALIGNMENT& UnitAlign);
virtual CCS_UNIT* GetFCRectAlign() { return FCTileMode; }
virtual void SetCCSFlag(GMM_RESOURCE_FLAG &Flags);
virtual uint8_t ValidateMMC(GMM_TEXTURE_INFO &Surf);
virtual uint8_t ValidateCCS(GMM_TEXTURE_INFO &Surf);
virtual uint8_t ValidateUnifiedAuxSurface(GMM_TEXTURE_INFO &Surf);
virtual uint8_t CheckFmtDisplayDecompressible(GMM_TEXTURE_INFO &Surf,
bool IsSupportedRGB64_16_16_16_16,
bool IsSupportedRGB32_8_8_8_8,
bool IsSupportedRGB32_2_10_10_10,
bool IsSupportedMediaFormats);
virtual uint8_t OverrideCompressionFormat(GMM_RESOURCE_FORMAT Format, uint8_t IsMC);
};
}
#endif

View File

@ -0,0 +1,105 @@
/*==============================================================================
Copyright(c) 2019 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files(the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and / or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
============================================================================*/
#pragma once
#ifdef __cplusplus
#include "GmmGen11TextureCalc.h"
#include "../Platform/GmmGen12Platform.h"
/////////////////////////////////////////////////////////////////////////////////////
/// @file GmmGen12TextureCalc.h
/// @brief This file contains the functions and members definations for texture alloc-
/// ation on all Gen12 platforms.
/////////////////////////////////////////////////////////////////////////////////////
namespace GmmLib
{
/////////////////////////////////////////////////////////////////////////
/// Contains texture calc functions and members for Gen12 platforms.
/// This class is derived from the base GmmTextureCalc class so clients
/// shouldn't have to ever interact with this class directly.
/////////////////////////////////////////////////////////////////////////
class NON_PAGED_SECTION GmmGen12TextureCalc :
public GmmGen11TextureCalc
{
private:
protected:
virtual uint32_t Get2DMipMapHeight(
GMM_TEXTURE_INFO *pTexInfo);
virtual GMM_STATUS FillTexCCS(
GMM_TEXTURE_INFO *pSurf,
GMM_TEXTURE_INFO *pAuxTexInfo);
public:
/* Constructors */
GmmGen12TextureCalc()
{
}
~GmmGen12TextureCalc()
{
}
/* Function prototypes */
virtual GMM_STATUS GMM_STDCALL FillTex2D(GMM_TEXTURE_INFO *pTexInfo,
__GMM_BUFFER_TYPE *pRestrictions);
virtual GMM_STATUS GMM_STDCALL FillTexPlanar(GMM_TEXTURE_INFO *pTexInfo,
__GMM_BUFFER_TYPE *pRestrictions);
virtual GMM_STATUS GMM_STDCALL GetCCSScaleFactor(GMM_TEXTURE_INFO * pTexInfo,
CCS_UNIT& ScaleFactor);
GMM_STATUS GMM_STDCALL GetCCSExMode(GMM_TEXTURE_INFO * AuxSurf);
virtual uint32_t GMM_STDCALL ScaleTextureHeight(GMM_TEXTURE_INFO * pTexInfo,
uint32_t Height);
virtual uint32_t GMM_STDCALL ScaleTextureWidth (GMM_TEXTURE_INFO* pTexInfo,
uint32_t Width);
virtual uint32_t GMM_STDCALL ScaleFCRectHeight(GMM_TEXTURE_INFO * pTexInfo,
uint32_t Height);
virtual uint64_t GMM_STDCALL ScaleFCRectWidth(GMM_TEXTURE_INFO* pTexInfo,
uint64_t Width);
virtual GMM_STATUS GMM_STDCALL MSAACCSUsage(GMM_TEXTURE_INFO *pTexInfo);
virtual void GMM_STDCALL AllocateOneTileThanRequied(GMM_TEXTURE_INFO *pTexInfo,
GMM_GFX_SIZE_T &WidthBytesRender,
GMM_GFX_SIZE_T &WidthBytesPhysical,
GMM_GFX_SIZE_T &WidthBytesLock)
{
GMM_UNREFERENCED_PARAMETER(pTexInfo);
GMM_UNREFERENCED_PARAMETER(WidthBytesRender);
GMM_UNREFERENCED_PARAMETER(WidthBytesPhysical);
GMM_UNREFERENCED_PARAMETER(WidthBytesLock);
}
/* inline functions */
};
}
#endif // #ifdef __cplusplus

View File

@ -35,6 +35,8 @@ OTHER DEALINGS IN THE SOFTWARE.
// Maximums which bound all supported GT
#define GT_MAX_SLICE (4)
#define GT_MAX_SUBSLICE_PER_SLICE (8)
#define GT_MAX_SUBSLICE_PER_DSS (2) // Currently max value based on Gen12
#define GT_MAX_DUALSUBSLICE_PER_SLICE (6) // Currently max value based on Gen12LP
typedef struct GT_SUBSLICE_INFO
{
@ -43,11 +45,19 @@ typedef struct GT_SUBSLICE_INFO
uint32_t EuEnabledMask; // Mask of EUs enabled on this SubSlice
} GT_SUBSLICE_INFO;
typedef struct GT_DUALSUBSLICE_INFO
{
bool Enabled; // Bool to determine if this SS is enabled.
GT_SUBSLICE_INFO SubSlice[GT_MAX_SUBSLICE_PER_DSS]; // SS details that belong to this DualSubSlice.
} GT_DUALSUBSLICE_INFO;
typedef struct GT_SLICE_INFO
{
bool Enabled; // determine if this slice is enabled.
GT_SUBSLICE_INFO SubSliceInfo[GT_MAX_SUBSLICE_PER_SLICE]; // SS details that belong to this slice.
GT_DUALSUBSLICE_INFO DSSInfo[GT_MAX_DUALSUBSLICE_PER_SLICE]; // DSS details that belong to this slice.
uint32_t SubSliceEnabledCount; // No. of SS enabled in this slice
uint32_t DualSubSliceEnabledCount; // No. of DSS enabled in this slice
} GT_SLICE_INFO;
typedef struct GT_VEBOX_INFO
@ -119,6 +129,25 @@ typedef struct GT_VDBOX_INFO
bool IsValid; // flag to check if VDBoxInfo is valid.
} GT_VDBOX_INFO;
typedef struct GT_CCS_INFO
{
union CCSInstances
{
struct CCSBitStruct
{
uint32_t CCS0Enabled : 1; // To determine if CCS0 is enabled
uint32_t Reserved : 31; // Reserved bits
} Bits;
uint32_t CCSEnableMask; // Union for all CCS instances. It can be used to know which CCS is enabled.
} Instances;
uint32_t NumberOfCCSEnabled; // Number of bits set among bit 0-3 of CCSEnableMask;
bool IsValid; // flag to check if CCSInfo is valid.
} GT_CCS_INFO;
typedef struct GT_SQIDI_INFO
{
@ -133,6 +162,7 @@ typedef struct GT_SYSTEM_INFO
uint32_t ThreadCount; // total no of system threads available
uint32_t SliceCount; // Total no. of enabled slices
uint32_t SubSliceCount; // Total no. of enabled subslices
uint32_t DualSubSliceCount; // Total no. of enabled dualsubslices
uint64_t L3CacheSizeInKb; // Total L3 cache size in kilo bytes
uint64_t LLCCacheSizeInKb; // Total LLC cache size in kilo bytes
uint64_t EdramSizeInKb; // Total EDRAM size in kilo bytes
@ -147,6 +177,8 @@ typedef struct GT_SYSTEM_INFO
uint32_t TotalGsThreads; // Total threads in GS
uint32_t TotalPsThreadsWindowerRange; // Total threads in PS Windower Range
uint32_t TotalVsThreads_Pocs; // Total threads in VS for POCS
// Note: The CSR size requirement is not clear at this moment. Till then the driver will set
// the maximum size that should be sufficient for all platform SKUs.
uint32_t CsrSizeInMb; // Total size that driver needs to allocate for CSR.
@ -159,6 +191,7 @@ typedef struct GT_SYSTEM_INFO
uint32_t MaxEuPerSubSlice; // Max available EUs per sub-slice.
uint32_t MaxSlicesSupported; // Max slices this platfrom can have.
uint32_t MaxSubSlicesSupported; // Max total sub-slices this platform can have (not per slice)
uint32_t MaxDualSubSlicesSupported; // Max total dual sub-slices this platform can have (not per slice)
/*------------------------------------*/
// Flag to determine if hashing is enabled. If enabled then one of the L3 banks will be disabled.
@ -186,11 +219,10 @@ typedef struct GT_SYSTEM_INFO
GT_SQIDI_INFO SqidiInfo;
uint32_t ReservedCCSWays; // Reserved CCS ways provides value of reserved L3 ways for CCS when CCS is enabled.
// This is a hardcoded value as suggested by HW. No MMIO read is needed for same.
// This is a hardcoded value as suggested by HW. No MMIO read is needed for same.
GT_CCS_INFO CCSInfo; // CCSInfo provides details(enabled/disabled) of all CCS instances.
} GT_SYSTEM_INFO, *PGT_SYSTEM_INFO;
#pragma pack(pop)
#endif //__GT_SYS_INFO_H__

View File

@ -64,6 +64,7 @@ typedef enum {
IGFX_ICELAKE_LP,
IGFX_LAKEFIELD,
IGFX_ELKHARTLAKE,
IGFX_TIGERLAKE_LP,
IGFX_MAX_PRODUCT,
@ -87,6 +88,7 @@ typedef enum {
PCH_ICP_LP, // ICL LP PCH
PCH_ICP_N, // ICL N PCH
PCH_LKF, // LKF PCH
PCH_TGL_LP, // TGL LP PCH
PCH_CMP_LP, // CML LP PCH
PCH_CMP_H, // CML Halo PCH
PCH_CMP_V, // CML V PCH
@ -111,6 +113,8 @@ typedef enum {
IGFX_GEN10LP_CORE = 14, //Gen10 LP Family
IGFX_GEN11_CORE = 15, //Gen11 Family
IGFX_GEN11LP_CORE = 16, //Gen11 LP Family
IGFX_GEN12_CORE = 17, //Gen12 Family
IGFX_GEN12LP_CORE = 18, //Gen12 LP Family
//Please add new GENs BEFORE THIS !
IGFX_MAX_CORE,
@ -280,6 +284,7 @@ typedef enum __NATIVEGTTYPE
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_5_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN5_CORE ) || \
@ -292,6 +297,7 @@ typedef enum __NATIVEGTTYPE
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_5_75_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN5_75_CORE ) || \
@ -302,6 +308,7 @@ typedef enum __NATIVEGTTYPE
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_6_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN6_CORE ) || \
@ -319,6 +326,7 @@ typedef enum __NATIVEGTTYPE
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_7_5_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN7_5_CORE ) || \
@ -326,31 +334,38 @@ typedef enum __NATIVEGTTYPE
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_8_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN8_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_8_CHV_OR_LATER(p) ( ( GFX_GET_CURRENT_PRODUCT(p) == IGFX_CHERRYVIEW ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_9_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_10_OR_LATER(p) (( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_11_OR_LATER(p) (( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_12_OR_LATER(p) (( GFX_GET_CURRENT_RENDERCORE(p) >= IGFX_GEN12_CORE ))
#define GFX_IS_ATOM_PRODUCT_FAMILY(p) ( GFX_IS_PRODUCT(p, IGFX_VALLEYVIEW) || \
GFX_IS_PRODUCT(p, IGFX_CHERRYVIEW) || \
GFX_IS_PRODUCT(p, IGFX_BROXTON) )
@ -1144,6 +1159,18 @@ typedef enum __NATIVEGTTYPE
#define IICL_LP_1x4x8_LOW_MEDIA_ULT_DEVICE_F0_ID 0x8A56
#define IICL_LP_1x4x8_LOW_MEDIA_ULX_DEVICE_F0_ID 0x8A58
//TGL LP
#define IGEN12LP_GT1_MOB_DEVICE_F0_ID 0xFF20
#define ITGL_LP_1x6x16_UNKNOWN_SKU_F0_ID_5 0x9A49 // Remove this once newer enums are merged in OpenCL. Added this to avoid build failure with Linux/OpenCL.
#define ITGL_LP_1x6x16_ULT_15W_DEVICE_F0_ID 0x9A49 // Mobile - U42 - 15W
#define ITGL_LP_1x6x16_ULX_5_2W_DEVICE_F0_ID 0x9A40 // Mobile - Y42 - 5.2W
#define ITGL_LP_1x6x16_ULT_12W_DEVICE_F0_ID 0x9A59 // Mobile - U42 - 12W
#define ITGL_LP_1x2x16_HALO_45W_DEVICE_F0_ID 0x9A60 // Halo - H81 - 45W
#define ITGL_LP_1x2x16_DESK_65W_DEVICE_F0_ID 0x9A68 // Desktop - S81 - 35W/65W/95W
#define ITGL_LP_1x2x16_HALO_WS_45W_DEVICE_F0_ID 0x9A70 // Mobile WS - H81 - 45W
#define ITGL_LP_1x2x16_DESK_WS_65W_DEVICE_F0_ID 0x9A78 // Desktop WS- S81 - 35W/65W/95W
#define ITGL_LP_GT0_ULT_DEVICE_F0_ID 0x9A7F // GT0 - No GFX, Display Only
//LKF
#define ILKF_1x8x8_DESK_DEVICE_F0_ID 0x9840
#define ILKF_GT0_DESK_DEVICE_A0_ID 0x9850
@ -1193,6 +1220,39 @@ typedef enum __NATIVEGTTYPE
// LKF-PCH Device IDs
#define PCH_LKF_UNFUSED_SKU_ID 0x9880
#define PCH_LKF_SUPER_SKU_ID 0x9881
// TGL_LP PCH Device ID range 0xA080-0xA09F
#define PCH_TGL_LP_UNKNOWN_SKU_ID_1 0xA080
#define PCH_TGL_LP_UNKNOWN_SKU_ID_2 0xA081
#define PCH_TGL_LP_UNKNOWN_SKU_ID_3 0xA082
#define PCH_TGL_LP_UNKNOWN_SKU_ID_4 0xA083
#define PCH_TGL_LP_UNKNOWN_SKU_ID_5 0xA084
#define PCH_TGL_LP_UNKNOWN_SKU_ID_6 0xA085
#define PCH_TGL_LP_UNKNOWN_SKU_ID_7 0xA086
#define PCH_TGL_LP_UNKNOWN_SKU_ID_8 0xA087
#define PCH_TGL_LP_UNKNOWN_SKU_ID_9 0xA088
#define PCH_TGL_LP_UNKNOWN_SKU_ID_10 0xA089
#define PCH_TGL_LP_UNKNOWN_SKU_ID_11 0xA08A
#define PCH_TGL_LP_UNKNOWN_SKU_ID_12 0xA08B
#define PCH_TGL_LP_UNKNOWN_SKU_ID_13 0xA08C
#define PCH_TGL_LP_UNKNOWN_SKU_ID_14 0xA08D
#define PCH_TGL_LP_UNKNOWN_SKU_ID_15 0xA08E
#define PCH_TGL_LP_UNKNOWN_SKU_ID_16 0xA08F
#define PCH_TGL_LP_UNKNOWN_SKU_ID_17 0xA090
#define PCH_TGL_LP_UNKNOWN_SKU_ID_18 0xA091
#define PCH_TGL_LP_UNKNOWN_SKU_ID_19 0xA092
#define PCH_TGL_LP_UNKNOWN_SKU_ID_20 0xA093
#define PCH_TGL_LP_UNKNOWN_SKU_ID_21 0xA094
#define PCH_TGL_LP_UNKNOWN_SKU_ID_22 0xA095
#define PCH_TGL_LP_UNKNOWN_SKU_ID_23 0xA096
#define PCH_TGL_LP_UNKNOWN_SKU_ID_24 0xA097
#define PCH_TGL_LP_UNKNOWN_SKU_ID_25 0xA098
#define PCH_TGL_LP_UNKNOWN_SKU_ID_26 0xA099
#define PCH_TGL_LP_UNKNOWN_SKU_ID_27 0xA09A
#define PCH_TGL_LP_UNKNOWN_SKU_ID_28 0xA09B
#define PCH_TGL_LP_UNKNOWN_SKU_ID_29 0xA09C
#define PCH_TGL_LP_UNKNOWN_SKU_ID_30 0xA09D
#define PCH_TGL_LP_UNKNOWN_SKU_ID_31 0xA09E
#define PCH_TGL_LP_UNKNOWN_SKU_ID_32 0xA09F
//define CML LP PCH Device Ids
#define PCH_CMP_LP_DEV_P1_ID 0x0280

View File

@ -66,6 +66,8 @@ typedef struct _SKU_FEATURE_TABLE
unsigned int FtrVERing : 1; // Separate Ring for VideoEnhancement commands
unsigned int FtrVcs2 : 1; // Second VCS engine supported on Gen8 to Gen10 (in some configurations);
unsigned int FtrLCIA : 1; // Indicates Atom (Low Cost Intel Architecture)
unsigned int FtrCCSRing : 1; // To indicate if CCS hardware ring support is present.
unsigned int FtrCCSNode : 1; // To indicate if CCS Node support is present.
unsigned int FtrTileY : 1; // Identifies Legacy tiles TileY/Yf/Ys on the platform
};
@ -75,6 +77,7 @@ typedef struct _SKU_FEATURE_TABLE
unsigned int FtrPPGTT : 1; // Per-Process GTT
unsigned int FtrIA32eGfxPTEs : 1; // GTT/PPGTT's use 64-bit IA-32e PTE format.
unsigned int FtrMemTypeMocsDeferPAT : 1; // Pre-Gen12 MOCS can defers to PAT, e.g. eLLC Target Cache for MOCS
unsigned int FtrPml4Support : 1; // PML4-based gfx page tables are supported (in addition to PD-based tables).
unsigned int FtrSVM : 1; // Shared Virtual Memory (i.e. support for SVM buffers which can be accessed by both the CPU and GPU at numerically equivalent addresses.)
unsigned int FtrTileMappedResource : 1; // Tiled Resource support aka Sparse Textures.
@ -82,17 +85,19 @@ typedef struct _SKU_FEATURE_TABLE
unsigned int FtrUserModeTranslationTable : 1; // User mode managed Translation Table support for Tiled Resources.
unsigned int FtrNullPages : 1; // Support for PTE-based Null pages for Sparse/Tiled Resources).
unsigned int FtrEDram : 1; // embedded DRAM enable
unsigned int FtrLLCBypass : 1; // Partial tunneling of UC memory traffic via CCF (LLC Bypass)
unsigned int FtrCrystalwell : 1; // Crystalwell Sku
unsigned int FtrCentralCachePolicy : 1; // Centralized Cache Policy
unsigned int FtrWddm2GpuMmu : 1; // WDDMv2 GpuMmu Model (Set in platform SKU files, but disabled by GMM as appropriate for given system.)
unsigned int FtrWddm2Svm : 1; // WDDMv2 SVM Model (Set in platform SKU files, but disabled by GMM as appropriate for given system.)
unsigned int FtrStandardMipTailFormat : 1; // Dx Standard MipTail Format for TileYf/Ys
unsigned int FtrWddm2_1_64kbPages : 1; // WDDMv2.1 64KB page support
unsigned int FtrE2ECompression : 1; // E2E Compression ie Aux Table support
unsigned int FtrLinearCCS : 1; // Linear Aux surface is supported
unsigned int FtrFrameBufferLLC : 1; // Displayable Frame buffers cached in LLC
unsigned int FtrDriverFLR : 1; // Enable Function Level Reset (Gen11+)
unsigned int FtrLocalMemory : 1;
unsigned int FtrLLCBypass : 1; // Partial tunneling of UC memory traffic via CCF (LLC Bypass)
};
};
struct //_sku_3d
@ -362,6 +367,12 @@ typedef struct _WA_TABLE
WA_BUG_TYPE_FUNCTIONAL,
WA_BUG_PERF_IMPACT, WA_COMPONENT_GMM)
WA_DECLARE(
WaMemTypeIsMaxOfPatAndMocs,
"WA to set PAT.MT = UC. Since TGLLP uses MAX function to resolve PAT vs MOCS MemType So unless PTE.PAT says UC, MOCS won't be able to set UC!",
WA_BUG_TYPE_FUNCTIONAL,
WA_BUG_PERF_IMPACT, WA_COMPONENT_GMM)
WA_DECLARE(
WaGttPat0GttWbOverOsIommuEllcOnly,
"WA to set PAT0 to full cacheable (LLC+eLLC) for GTT access over eLLC only usage for OS based SVM",

View File

@ -104,6 +104,7 @@ typedef enum PERFTAG_CLASS_ENUM
#define PERFTAG_UNKNOWN_BITS(PerfTag) ( PerfTag & (ULONG)0xFFFF0000 ) // Bits[16,31] Usage component specific
#define PERFTAG_FRAMEID(PerfTag) ( PerfTag & (ULONG)0x00FF0000 ) // Bits[16,23] Media Specific - frame id
#define PERFTAG_BUFFERID(PerfTag) ( PerfTag & (ULONG)0x0F000000 ) // Bits[24,27] Media Specific - buffer id
#define PERFTAG_BATCHBUFFERID(PerfTag) ( PerfTag & (ULONG)0xF0000000 ) // Bits[28,31] Media Specific - batch buffer id
#define PERFTAG_FRAMEID_SHIFT 16
#define PERFTAG_BUFFERID_SHIFT 24
#define PERFTAG_BATCHBUFFERID_SHIFT 28
@ -328,6 +329,13 @@ typedef enum _VPHAL_PERFTAG
VPHAL_FDFB_FB_EYE_SAHDOW,
VPHAL_FDFB_FB_EYE_COLOR,
// SR
VPHAL_SR_CONV_1X1_32_5,
VPHAL_SR_CONV_1X1_5_32,
VPHAL_SR_CONV_3X3,
VPHAL_SR_SUBPIXEL_CONV_2X2,
VPHAL_SR_CONV_5X5_Y8,
// ADD TAGS FOR NEW ADVPROC KRNS HERE
VPHAL_PERFTAG_MAX

View File

@ -54,7 +54,7 @@ typedef enum GPUNODE_REC
GPUNODE_BLT = 2, // available on GT
GPUNODE_VE = 3, // available on HSW+ (VideoEnhancement), virtual node
GPUNODE_VCS2 = 4, // available on BDW/SKL/KBL GT3+ and CNL,
GPUNODE_RESERVED = 5, //
GPUNODE_CCS0 = 5, //
GPUNODE_REAL_MAX, // all nodes beyond this are virtual nodes - they don't have an actual GPU engine
GPUNODE_PICS = 6, // available on CNL+. Real node but only for KMD internal use. Hence kept after GPUNODE_REAL_MAX (Note: We need to keep it before overlay node)
GPUNODE_OVERLAY = 7,