sync comments.

git-svn-id: https://edk2.svn.sourceforge.net/svnroot/edk2/trunk/edk2@6274 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
vanjeff 2008-10-28 09:51:44 +00:00
parent 85ea851e6a
commit 57246fe073
19 changed files with 77 additions and 77 deletions

View File

@ -38,15 +38,15 @@ ASM_PFX(InternalX86DisablePaging32):
movl 8(%esp), %ecx movl 8(%esp), %ecx
movl 12(%esp), %edx movl 12(%esp), %edx
pushfl pushfl
pop %edi pop %edi # save EFLAGS to edi
cli cli
movl %cr0, %eax movl %cr0, %eax
btrl $31, %eax btrl $31, %eax
movl 16(%esp), %esp movl 16(%esp), %esp
movl %eax, %cr0 movl %eax, %cr0
push %edi push %edi
popfl popfl # restore EFLAGS from edi
push %edx push %edx
push %ecx push %ecx
call *%ebx call *%ebx
jmp . jmp . # EntryPoint() should not return

View File

@ -34,8 +34,8 @@ ASM_PFX(InternalMathDivU64x32):
movl 12(%esp), %ecx movl 12(%esp), %ecx
xorl %edx, %edx xorl %edx, %edx
divl %ecx divl %ecx
push %eax push %eax # save quotient on stack
movl 8(%esp), %eax movl 8(%esp), %eax
divl %ecx divl %ecx
pop %edx pop %edx # restore high-order dword of the quotient
ret ret

View File

@ -31,16 +31,16 @@
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalMathDivRemU64x32): ASM_PFX(InternalMathDivRemU64x32):
movl 12(%esp), %ecx movl 12(%esp), %ecx # ecx <- divisor
movl 8(%esp), %eax movl 8(%esp), %eax # eax <- dividend[32..63]
xorl %edx, %edx xorl %edx, %edx
divl %ecx divl %ecx # eax <- quotient[32..63], edx <- remainder
push %eax push %eax
movl 8(%esp), %eax movl 8(%esp), %eax # eax <- dividend[0..31]
divl %ecx divl %ecx # eax <- quotient[0..31]
movl 20(%esp), %ecx movl 20(%esp), %ecx # ecx <- Remainder
jecxz L1 jecxz L1 # abandon remainder if Remainder == NULL
movl %edx, (%ecx) movl %edx, (%ecx)
L1: L1:
pop %edx pop %edx # edx <- quotient[32..63]
ret ret

View File

@ -32,13 +32,13 @@
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalMathDivRemU64x64): ASM_PFX(InternalMathDivRemU64x64):
movl 16(%esp), %ecx movl 16(%esp), %ecx # ecx <- divisor[32..63]
testl %ecx, %ecx testl %ecx, %ecx
jnz Hard jnz Hard # call _@DivRemU64x64 if Divisor > 2^32
movl 20(%esp), %ecx movl 20(%esp), %ecx
jecxz L1 jecxz L1
and $0, 4(%ecx) and $0, 4(%ecx) # zero high dword of remainder
movl %ecx, 16(%esp) movl %ecx, 16(%esp) # set up stack frame to match DivRemU64x32
L1: L1:
jmp ASM_PFX(InternalMathDivRemU64x32) jmp ASM_PFX(InternalMathDivRemU64x32)
Hard: Hard:
@ -46,10 +46,10 @@ Hard:
push %esi push %esi
push %edi push %edi
mov 20(%esp), %edx mov 20(%esp), %edx
mov 16(%esp), %eax mov 16(%esp), %eax # edx:eax <- dividend
movl %edx, %edi movl %edx, %edi
movl %eax, %esi movl %eax, %esi # edi:esi <- dividend
mov 24(%esp), %ebx mov 24(%esp), %ebx # ecx:ebx <- divisor
L2: L2:
shrl %edx shrl %edx
rcrl $1, %eax rcrl $1, %eax
@ -57,32 +57,32 @@ L2:
shrl %ecx shrl %ecx
jnz L2 jnz L2
divl %ebx divl %ebx
movl %eax, %ebx movl %eax, %ebx # ebx <- quotient
movl 28(%esp), %ecx movl 28(%esp), %ecx # ecx <- high dword of divisor
mull 24(%esp) mull 24(%esp) # edx:eax <- quotient * divisor[0..31]
imull %ebx, %ecx imull %ebx, %ecx # ecx <- quotient * divisor[32..63]
addl %ecx, %edx addl %ecx, %edx # edx <- (quotient * divisor)[32..63]
mov 32(%esp), %ecx mov 32(%esp), %ecx # ecx <- addr for Remainder
jc TooLarge jc TooLarge # product > 2^64
cmpl %edx, %edi cmpl %edx, %edi # compare high 32 bits
ja Correct ja Correct
jb TooLarge jb TooLarge # product > dividend
cmpl %eax, %esi cmpl %eax, %esi
jae Correct jae Correct # product <= dividend
TooLarge: TooLarge:
decl %ebx decl %ebx # adjust quotient by -1
jecxz Return jecxz Return # return if Remainder == NULL
sub 24(%esp), %eax sub 24(%esp), %eax
sbb 28(%esp), %edx sbb 28(%esp), %edx # edx:eax <- (quotient - 1) * divisor
Correct: Correct:
jecxz Return jecxz Return
subl %eax, %esi subl %eax, %esi
sbbl %edx, %edi sbbl %edx, %edi # edi:esi <- remainder
movl %esi, (%ecx) movl %esi, (%ecx)
movl %edi, 4(%ecx) movl %edi, 4(%ecx)
Return: Return:
movl %ebx, %eax movl %ebx, %eax # eax <- quotient
xorl %edx, %edx xorl %edx, %edx # quotient is 32 bits long
pop %edi pop %edi
pop %esi pop %esi
pop %ebx pop %ebx

View File

@ -38,14 +38,14 @@ ASM_PFX(InternalX86EnablePaging32):
movl 8(%esp), %ecx movl 8(%esp), %ecx
movl 12(%esp), %edx movl 12(%esp), %edx
pushfl pushfl
pop %edi pop %edi # save flags in edi
cli cli
movl %cr0, %eax movl %cr0, %eax
btsl $31, %eax btsl $31, %eax
movl 16(%esp), %esp movl 16(%esp), %esp
movl %eax, %cr0 movl %eax, %cr0
push %edi push %edi
popfl popfl # restore flags
push %edx push %edx
push %ecx push %ecx
call *%ebx call *%ebx

View File

@ -36,7 +36,7 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalX86EnablePaging64): ASM_PFX(InternalX86EnablePaging64):
cli cli
movl $LongStart, (%esp) movl $LongStart, (%esp) # offset for far retf, seg is the 1st arg
movl %cr4, %eax movl %cr4, %eax
orb $0x20, %al orb $0x20, %al
movl %eax, %cr4 # enable PAE movl %eax, %cr4 # enable PAE
@ -45,11 +45,11 @@ ASM_PFX(InternalX86EnablePaging64):
orb $1, %ah # set LME orb $1, %ah # set LME
wrmsr wrmsr
movl %cr0, %eax movl %cr0, %eax
btsl $31, %eax btsl $31, %eax # set PG
movl %eax, %cr0 # enable paging movl %eax, %cr0 # enable paging
lret lret # topmost 2 dwords hold the address
LongStart: # long mode starts here LongStart: # long mode starts here
.byte 0x67, 0x48 .byte 0x67, 0x48 # 32-bit address size, 64-bit operand size
movl (%esp), %ebx # mov rbx, [esp] movl (%esp), %ebx # mov rbx, [esp]
.byte 0x67, 0x48 .byte 0x67, 0x48
movl 8(%esp), %ecx # mov rcx, [esp + 8] movl 8(%esp), %ecx # mov rcx, [esp + 8]
@ -60,4 +60,4 @@ LongStart: # long mode starts here
.byte 0x48 .byte 0x48
addl $0x-0x20, %esp # add rsp, -20h addl $0x-0x20, %esp # add rsp, -20h
call *%ebx # call rbx call *%ebx # call rbx
jmp . jmp . # no one should get here

View File

@ -31,6 +31,6 @@
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalX86FxRestore): ASM_PFX(InternalX86FxRestore):
movl 4(%esp), %eax movl 4(%esp), %eax # Buffer must be 16-byte aligned
fxrstor (%eax) fxrstor (%eax)
ret ret

View File

@ -31,6 +31,6 @@
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalX86FxSave): ASM_PFX(InternalX86FxSave):
movl 4(%esp), %eax movl 4(%esp), %eax # Buffer must be 16-byte aligned
fxsave (%eax) fxsave (%eax)
ret ret

View File

@ -38,7 +38,7 @@ ASM_PFX(InternalMathLRotU64):
shldl %cl, %eax, %edx shldl %cl, %eax, %edx
rorl %cl, %ebx rorl %cl, %ebx
shldl %cl, %ebx, %eax shldl %cl, %ebx, %eax
testb $32, %cl testb $32, %cl # Count >= 32?
cmovnz %eax, %ecx cmovnz %eax, %ecx
cmovnz %edx, %eax cmovnz %edx, %eax
cmovnz %ecx, %edx cmovnz %ecx, %edx

View File

@ -33,7 +33,7 @@ ASM_PFX(InternalMathLShiftU64):
movb 12(%esp), %cl movb 12(%esp), %cl
xorl %eax, %eax xorl %eax, %eax
movl 4(%esp), %edx movl 4(%esp), %edx
testb $32, %cl testb $32, %cl # Count >= 32?
cmovz %edx, %eax cmovz %edx, %eax
cmovz 0x8(%esp), %edx cmovz 0x8(%esp), %edx
shld %cl, %eax, %edx shld %cl, %eax, %edx

View File

@ -30,12 +30,12 @@
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalLongJump): ASM_PFX(InternalLongJump):
pop %eax pop %eax # skip return address
pop %edx pop %edx # edx <- JumpBuffer
pop %eax pop %eax # eax <- Value
movl (%edx), %ebx movl (%edx), %ebx
movl 4(%edx), %esi movl 4(%edx), %esi
movl 8(%edx), %edi movl 8(%edx), %edi
movl 12(%edx), %ebp movl 12(%edx), %ebp
movl 16(%edx), %esp movl 16(%edx), %esp
jmp *20(%edx) jmp *20(%edx) # restore "eip"

View File

@ -36,5 +36,5 @@ ASM_PFX(AsmMonitor):
movl 4(%esp), %eax movl 4(%esp), %eax
movl 8(%esp), %ecx movl 8(%esp), %ecx
movl 12(%esp), %edx movl 12(%esp), %edx
monitor %eax, %ecx, %edx monitor %eax, %ecx, %edx # monitor
ret ret

View File

@ -35,7 +35,7 @@
ASM_PFX(InternalMathMultU64x32): ASM_PFX(InternalMathMultU64x32):
movl 12(%esp), %ecx movl 12(%esp), %ecx
movl %ecx, %eax movl %ecx, %eax
imull 8(%esp), %ecx imull 8(%esp), %ecx # overflow not detectable
mull 0x4(%esp) mull 0x4(%esp)
addl %ecx, %edx addl %ecx, %edx
ret ret

View File

@ -31,14 +31,14 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalMathMultU64x64): ASM_PFX(InternalMathMultU64x64):
push %ebx push %ebx
movl 8(%esp), %ebx movl 8(%esp), %ebx # ebx <- M1[0..31]
movl 16(%esp), %edx movl 16(%esp), %edx # edx <- M2[0..31]
movl %ebx, %ecx movl %ebx, %ecx
movl %edx, %eax movl %edx, %eax
imull 20(%esp), %ebx imull 20(%esp), %ebx # ebx <- M1[0..31] * M2[32..63]
imull 12(%esp), %edx imull 12(%esp), %edx # edx <- M1[32..63] * M2[0..31]
addl %edx, %ebx addl %edx, %ebx # carries are abandoned
mull %ecx mull %ecx # edx:eax <- M1[0..31] * M2[0..31]
addl %ebx, %edx addl %ebx, %edx # carries are abandoned
pop %ebx pop %ebx
ret ret

View File

@ -34,5 +34,5 @@
ASM_PFX(AsmMwait): ASM_PFX(AsmMwait):
movl 4(%esp), %eax movl 4(%esp), %eax
movl 8(%esp), %ecx movl 8(%esp), %ecx
mwait %eax, %ecx mwait %eax, %ecx # mwait
ret ret

View File

@ -38,8 +38,8 @@ ASM_PFX(InternalMathRRotU64):
shrdl %cl, %edx, %eax shrdl %cl, %edx, %eax
roll %cl, %ebx roll %cl, %ebx
shrdl %cl, %ebx, %edx shrdl %cl, %ebx, %edx
testb $32, %cl testb $32, %cl # Count >= 32?
cmovnz %eax, %ecx cmovnz %eax, %ecx # switch eax & edx if Count >= 32
cmovnz %edx, %eax cmovnz %edx, %eax
cmovnz %ecx, %edx cmovnz %ecx, %edx
pop %ebx pop %ebx

View File

@ -33,10 +33,10 @@
# ); # );
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(InternalMathRShiftU64): ASM_PFX(InternalMathRShiftU64):
movb 12(%esp), %cl movb 12(%esp), %cl # cl <- Count
xorl %edx, %edx xorl %edx, %edx
movl 8(%esp), %eax movl 8(%esp), %eax
testb $32, %cl testb $32, %cl # Count >= 32?
cmovz %eax, %edx cmovz %eax, %edx
cmovz 0x4(%esp), %eax cmovz 0x4(%esp), %eax
shrdl %cl, %edx, %eax shrdl %cl, %edx, %eax

View File

@ -30,15 +30,15 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
ASM_PFX(SetJump): ASM_PFX(SetJump):
pushl 0x4(%esp) pushl 0x4(%esp)
call ASM_PFX(InternalAssertJumpBuffer) call ASM_PFX(InternalAssertJumpBuffer) # To validate JumpBuffer
pop %ecx
pop %ecx pop %ecx
pop %ecx # ecx <- return address
movl (%esp), %edx movl (%esp), %edx
movl %ebx, (%edx) movl %ebx, (%edx)
movl %esi, 4(%edx) movl %esi, 4(%edx)
movl %edi, 8(%edx) movl %edi, 8(%edx)
movl %ebp, 12(%edx) movl %ebp, 12(%edx)
movl %esp, 16(%edx) movl %esp, 16(%edx)
movl %ecx, 20(%edx) movl %ecx, 20(%edx) # eip value to restore in LongJump
xorl %eax, %eax xorl %eax, %eax
jmp *%ecx jmp *%ecx

View File

@ -31,8 +31,8 @@
#------------------------------------------------------------------------------ #------------------------------------------------------------------------------
.globl ASM_PFX(InternalMathSwapBytes64) .globl ASM_PFX(InternalMathSwapBytes64)
ASM_PFX(InternalMathSwapBytes64): ASM_PFX(InternalMathSwapBytes64):
movl 8(%esp), %eax movl 8(%esp), %eax # eax <- upper 32 bits
movl 4(%esp), %edx movl 4(%esp), %edx # edx <- lower 32 bits
bswapl %eax bswapl %eax
bswapl %edx bswapl %edx
ret ret