UefiCpuPkg/PiSmmCpuDxeSmm: Refactor code to create default Page Table

For MM:
Since all accessible NON-MMRAM memory and attribute shall be in
ResourceDescriptor HOBs for MM, the page table for MM can be finalized
and created in the default Page.

For SMM:
There are still 2 steps for the finalized default Page:
1. Create default Page
2. update the page table in the first SMI when SMM ready to lock
   happen

This patch to refactor the GenSmmPageTable() function to create the
default Page Table for Both SMM and MM:
1. Create NonMmram MemoryRegion
2. Gen NonMmram MemoryRegion PageTable
3. Gen MMRAM Range PageTable
4. Consider PcdCpuSmmStackGuard & PcdNullPointerDetectionPropertyMask
   cases.

Meanwhile, mXdSupported needs to be initialized before GenSmmPageTable since
it's required by GenSmmPageTable function. So, move the mXdSupported init
from CheckFeatureSupported to the common EntryPoint function.

Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Star Zeng <star.zeng@intel.com>
Cc: Dun Tan <dun.tan@intel.com>
Cc: Hongbin1 Zhang <hongbin1.zhang@intel.com>
Cc: Wei6 Xu <wei6.xu@intel.com>
Cc: Yuanhao Xie <yuanhao.xie@intel.com>
This commit is contained in:
Jiaxin Wu 2024-06-26 14:49:01 +08:00 committed by mergify[bot]
parent 14cb36685b
commit 1c19ccd510
7 changed files with 287 additions and 72 deletions

View File

@ -601,3 +601,70 @@ CreateExtendedProtectionRange (
return;
}
/**
Create the Non-Mmram Memory Region.
Build MemoryRegion to cover [0, 2^PhysicalAddressBits) by excluding all Smram range.
The memory attribute is all-allowed (read/write/executable).
The caller is responsible for freeing MemoryRegion via FreePool().
@param[in] PhysicalAddressBits The bits of physical address to map.
@param[out] MemoryRegion Returned Non-Mmram Memory regions.
@param[out] MemoryRegionCount A pointer to the number of Memory regions.
**/
VOID
CreateNonMmramMemMap (
IN UINT8 PhysicalAddressBits,
OUT MM_CPU_MEMORY_REGION **MemoryRegion,
OUT UINTN *MemoryRegionCount
)
{
UINT64 MaxLength;
UINTN Count;
UINTN Index;
UINT64 PreviousAddress;
UINT64 Base;
UINT64 Length;
ASSERT (MemoryRegion != NULL && MemoryRegionCount != NULL);
*MemoryRegion = NULL;
*MemoryRegionCount = 0;
MaxLength = LShiftU64 (1, PhysicalAddressBits);
//
// Build MemoryRegion to cover [0, 2^PhysicalAddressBits) by excluding all Smram range
//
Count = mSmmCpuSmramRangeCount + 1;
*MemoryRegionCount = Count;
*MemoryRegion = (MM_CPU_MEMORY_REGION *)AllocateZeroPool (sizeof (MM_CPU_MEMORY_REGION) * Count);
ASSERT (*MemoryRegion != NULL);
PreviousAddress = 0;
for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
Base = mSmmCpuSmramRanges[Index].CpuStart;
Length = mSmmCpuSmramRanges[Index].PhysicalSize;
ASSERT (MaxLength > Base + Length);
if (Base > PreviousAddress) {
(*MemoryRegion)[Index].Base = PreviousAddress;
(*MemoryRegion)[Index].Length = Base - PreviousAddress;
(*MemoryRegion)[Index].Attribute = 0;
}
PreviousAddress = Base + Length;
}
//
// Set the last remaining range
//
if (PreviousAddress < MaxLength) {
(*MemoryRegion)[Index].Base = PreviousAddress;
(*MemoryRegion)[Index].Length = MaxLength - PreviousAddress;
}
}

View File

@ -156,3 +156,23 @@ CreateExtendedProtectionRange (
{
BuildMemoryMapFromResDescHobs (MemoryRegion, MemoryRegionCount);
}
/**
Create the Non-Mmram Memory Region within the ResourceDescriptor HOBs
without Logging attribute.
The caller is responsible for freeing MemoryRegion via FreePool().
@param[in] PhysicalAddressBits The bits of physical address to map.
@param[out] MemoryRegion Returned Non-Mmram Memory regions.
@param[out] MemoryRegionCount A pointer to the number of Memory regions.
**/
VOID
CreateNonMmramMemMap (
IN UINT8 PhysicalAddressBits,
OUT MM_CPU_MEMORY_REGION **MemoryRegion,
OUT UINTN *MemoryRegionCount
)
{
BuildMemoryMapFromResDescHobs (MemoryRegion, MemoryRegionCount);
}

View File

@ -728,19 +728,20 @@ PiSmmCpuEntryCommon (
VOID
)
{
EFI_STATUS Status;
UINTN Index;
UINTN TileCodeSize;
UINTN TileDataSize;
UINTN TileSize;
UINT8 *Stacks;
UINT32 RegEax;
UINT32 RegEbx;
UINT32 RegEcx;
UINT32 RegEdx;
UINTN FamilyId;
UINTN ModelId;
UINT32 Cr3;
EFI_STATUS Status;
UINTN Index;
UINTN TileCodeSize;
UINTN TileDataSize;
UINTN TileSize;
UINT8 *Stacks;
UINT32 RegEax;
UINT32 RegEbx;
UINT32 RegEcx;
UINT32 RegEdx;
CPUID_EXTENDED_CPU_SIG_EDX ExtendedRegEdx;
UINTN FamilyId;
UINTN ModelId;
UINT32 Cr3;
PERF_FUNCTION_BEGIN ();
@ -928,6 +929,36 @@ PiSmmCpuEntryCommon (
PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
}
//
// Check XD supported or not.
//
RegEax = 0;
ExtendedRegEdx.Uint32 = 0;
AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
if (RegEax <= CPUID_EXTENDED_FUNCTION) {
//
// Extended CPUID functions are not supported on this processor.
//
mXdSupported = FALSE;
PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
}
AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &ExtendedRegEdx.Uint32);
if (ExtendedRegEdx.Bits.NX == 0) {
//
// Execute Disable Bit feature is not supported on this processor.
//
mXdSupported = FALSE;
PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
}
if (StandardSignatureIsAuthenticAMD ()) {
//
// AMD processors do not support MSR_IA32_MISC_ENABLE
//
PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
}
//
// Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
// specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.

View File

@ -959,7 +959,7 @@ IsSmmCommBufferForbiddenAddress (
IN UINT64 Address
);
/*
/**
Build extended protection MemoryRegion.
The caller is responsible for freeing MemoryRegion via FreePool().
@ -967,13 +967,30 @@ IsSmmCommBufferForbiddenAddress (
@param[out] MemoryRegion Returned Non-Mmram Memory regions.
@param[out] MemoryRegionCount A pointer to the number of Memory regions.
*/
**/
VOID
CreateExtendedProtectionRange (
OUT MM_CPU_MEMORY_REGION **MemoryRegion,
OUT UINTN *MemoryRegionCount
);
/**
Create the Non-Mmram Memory Region.
The caller is responsible for freeing MemoryRegion via FreePool().
@param[in] PhysicalAddressBits The bits of physical address to map.
@param[out] MemoryRegion Returned Non-Mmram Memory regions.
@param[out] MemoryRegionCount A pointer to the number of Memory regions.
**/
VOID
CreateNonMmramMemMap (
IN UINT8 PhysicalAddressBits,
OUT MM_CPU_MEMORY_REGION **MemoryRegion,
OUT UINTN *MemoryRegionCount
);
/**
This function caches the UEFI memory map information.
**/

View File

@ -1169,6 +1169,64 @@ EdkiiSmmClearMemoryAttributes (
return SmmClearMemoryAttributes (BaseAddress, Length, Attributes);
}
/**
Create page table based on input PagingMode, LinearAddress and Length.
@param[in, out] PageTable The pointer to the page table.
@param[in] PagingMode The paging mode.
@param[in] LinearAddress The start of the linear address range.
@param[in] Length The length of the linear address range.
@param[in] MapAttribute The MapAttribute of the linear address range
@param[in] MapMask The MapMask used for attribute. The corresponding field in Attribute is ignored if that in MapMask is 0.
**/
VOID
GenPageTable (
IN OUT UINTN *PageTable,
IN PAGING_MODE PagingMode,
IN UINT64 LinearAddress,
IN UINT64 Length,
IN IA32_MAP_ATTRIBUTE MapAttribute,
IN IA32_MAP_ATTRIBUTE MapMask
)
{
RETURN_STATUS Status;
UINTN PageTableBufferSize;
VOID *PageTableBuffer;
PageTableBufferSize = 0;
Status = PageTableMap (
PageTable,
PagingMode,
NULL,
&PageTableBufferSize,
LinearAddress,
Length,
&MapAttribute,
&MapMask,
NULL
);
if (Status == RETURN_BUFFER_TOO_SMALL) {
PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
ASSERT (PageTableBuffer != NULL);
Status = PageTableMap (
PageTable,
PagingMode,
PageTableBuffer,
&PageTableBufferSize,
LinearAddress,
Length,
&MapAttribute,
&MapMask,
NULL
);
}
ASSERT (Status == RETURN_SUCCESS);
ASSERT (PageTableBufferSize == 0);
}
/**
Create page table based on input PagingMode and PhysicalAddressBits in smm.
@ -1184,35 +1242,85 @@ GenSmmPageTable (
IN UINT8 PhysicalAddressBits
)
{
UINTN PageTableBufferSize;
UINTN PageTable;
VOID *PageTableBuffer;
IA32_MAP_ATTRIBUTE MapAttribute;
IA32_MAP_ATTRIBUTE MapMask;
RETURN_STATUS Status;
UINTN GuardPage;
UINTN Index;
UINT64 Length;
UINTN PageTable;
UINTN Index;
MM_CPU_MEMORY_REGION *MemoryRegion;
UINTN MemoryRegionCount;
IA32_MAP_ATTRIBUTE MapAttribute;
IA32_MAP_ATTRIBUTE MapMask;
RETURN_STATUS Status;
UINTN GuardPage;
Length = LShiftU64 (1, PhysicalAddressBits);
PageTable = 0;
PageTableBufferSize = 0;
MapMask.Uint64 = MAX_UINT64;
MapAttribute.Uint64 = mAddressEncMask;
MapAttribute.Bits.Present = 1;
MapAttribute.Bits.ReadWrite = 1;
MapAttribute.Bits.UserSupervisor = 1;
MapAttribute.Bits.Accessed = 1;
MapAttribute.Bits.Dirty = 1;
PageTable = 0;
MemoryRegion = NULL;
MemoryRegionCount = 0;
MapMask.Uint64 = MAX_UINT64;
Status = PageTableMap (&PageTable, PagingMode, NULL, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
ASSERT (Status == RETURN_BUFFER_TOO_SMALL);
DEBUG ((DEBUG_INFO, "GenSMMPageTable: 0x%x bytes needed for initial SMM page table\n", PageTableBufferSize));
PageTableBuffer = AllocatePageTableMemory (EFI_SIZE_TO_PAGES (PageTableBufferSize));
ASSERT (PageTableBuffer != NULL);
Status = PageTableMap (&PageTable, PagingMode, PageTableBuffer, &PageTableBufferSize, 0, Length, &MapAttribute, &MapMask, NULL);
ASSERT (Status == RETURN_SUCCESS);
ASSERT (PageTableBufferSize == 0);
//
// 1. Create NonMmram MemoryRegion
//
CreateNonMmramMemMap (PhysicalAddressBits, &MemoryRegion, &MemoryRegionCount);
ASSERT (MemoryRegion != NULL && MemoryRegionCount != 0);
//
// 2. Gen NonMmram MemoryRegion PageTable
//
for (Index = 0; Index < MemoryRegionCount; Index++) {
ASSERT (MemoryRegion[Index].Base % SIZE_4KB == 0);
ASSERT (MemoryRegion[Index].Length % EFI_PAGE_SIZE == 0);
//
// Set the MapAttribute
//
MapAttribute.Uint64 = mAddressEncMask|MemoryRegion[Index].Base;
MapAttribute.Bits.Present = 1;
MapAttribute.Bits.ReadWrite = 1;
MapAttribute.Bits.UserSupervisor = 1;
MapAttribute.Bits.Accessed = 1;
MapAttribute.Bits.Dirty = 1;
//
// Update the MapAttribute according MemoryRegion[Index].Attribute
//
if ((MemoryRegion[Index].Attribute & EFI_MEMORY_RO) != 0) {
MapAttribute.Bits.ReadWrite = 0;
}
if ((MemoryRegion[Index].Attribute & EFI_MEMORY_XP) != 0) {
if (mXdSupported) {
MapAttribute.Bits.Nx = 1;
}
}
GenPageTable (&PageTable, PagingMode, MemoryRegion[Index].Base, (UINTN)MemoryRegion[Index].Length, MapAttribute, MapMask);
}
//
// Free the MemoryRegion after usage
//
if (MemoryRegion != NULL) {
FreePool (MemoryRegion);
}
//
// 3. Gen MMRAM Range PageTable
//
for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
ASSERT (mSmmCpuSmramRanges[Index].CpuStart % SIZE_4KB == 0);
ASSERT (mSmmCpuSmramRanges[Index].PhysicalSize % EFI_PAGE_SIZE == 0);
//
// Set the MapAttribute
//
MapAttribute.Uint64 = mAddressEncMask|mSmmCpuSmramRanges[Index].CpuStart;
MapAttribute.Bits.Present = 1;
MapAttribute.Bits.ReadWrite = 1;
MapAttribute.Bits.UserSupervisor = 1;
MapAttribute.Bits.Accessed = 1;
MapAttribute.Bits.Dirty = 1;
GenPageTable (&PageTable, PagingMode, mSmmCpuSmramRanges[Index].CpuStart, mSmmCpuSmramRanges[Index].PhysicalSize, MapAttribute, MapMask);
}
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
//

View File

@ -884,33 +884,6 @@ CheckFeatureSupported (
}
}
if (mXdSupported) {
AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
if (RegEax <= CPUID_EXTENDED_FUNCTION) {
//
// Extended CPUID functions are not supported on this processor.
//
mXdSupported = FALSE;
PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
}
AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
//
// Execute Disable Bit feature is not supported on this processor.
//
mXdSupported = FALSE;
PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
}
if (StandardSignatureIsAuthenticAMD ()) {
//
// AMD processors do not support MSR_IA32_MISC_ENABLE
//
PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
}
}
if (mBtsSupported) {
AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {

View File

@ -39,9 +39,8 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
//
// CPU generic definition
//
#define CPUID1_EDX_XD_SUPPORT 0x100000
#define MSR_EFER 0xc0000080
#define MSR_EFER_XD 0x800
#define MSR_EFER 0xc0000080
#define MSR_EFER_XD 0x800
#define CPUID1_EDX_BTS_AVAILABLE 0x200000