lib: Allow compiling without FP support

Currently, we mandate 'F' and 'D' extension in riscv_fp.h so that
misaligned load/store emulation has access to FP registers.

The above is too restrictive and we should certainly allow compilation
for soft-FP toolchains and explicit PLATFORM_RISCV_ISA not having 'F'
and 'D' extensions.

This patch extends riscv_fp.h and misaligned load/store emulation to
allow compiling OpenSBI without FP support.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
This commit is contained in:
Anup Patel 2019-07-25 05:05:03 +00:00 committed by Anup Patel
parent 85546a5477
commit 0f18b3fe0a
3 changed files with 16 additions and 13 deletions

View File

@ -73,10 +73,6 @@
#define SET_FS_DIRTY() ((void)0)
#else
#error "Floating point emulation not supported.\n"
#endif
#define GET_F32_RS1(insn, regs) (GET_F32_REG(insn, 15, regs))
#define GET_F32_RS2(insn, regs) (GET_F32_REG(insn, 20, regs))
#define GET_F32_RS3(insn, regs) (GET_F32_REG(insn, 27, regs))
@ -94,3 +90,5 @@
#define GET_F64_RS2S(insn, regs) (GET_F64_REG(RVC_RS2S(insn), 0, regs))
#endif
#endif

View File

@ -52,8 +52,6 @@ static int fp_init(u32 hartid)
{
#ifdef __riscv_flen
int i;
#else
unsigned long fd_mask;
#endif
if (!misa_extension('D') && !misa_extension('F'))
@ -66,11 +64,6 @@ static int fp_init(u32 hartid)
for (i = 0; i < 32; i++)
init_fp_reg(i);
csr_write(CSR_FCSR, 0);
#else
fd_mask = (1 << ('F' - 'A')) | (1 << ('D' - 'A'));
csr_clear(CSR_MISA, fd_mask);
if (csr_read(CSR_MISA) & fd_mask)
return SBI_ENOTSUPP;
#endif
return 0;

View File

@ -41,12 +41,14 @@ int sbi_misaligned_load_handler(u32 hartid, ulong mcause,
} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
len = 4;
#endif
#ifdef __riscv_flen
} else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
fp = 1;
len = 8;
} else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
fp = 1;
len = 4;
#endif
} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
len = 2;
shift = 8 * (sizeof(ulong) - len);
@ -71,6 +73,7 @@ int sbi_misaligned_load_handler(u32 hartid, ulong mcause,
((insn >> SH_RD) & 0x1f)) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
#ifdef __riscv_flen
} else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
fp = 1;
len = 8;
@ -87,9 +90,11 @@ int sbi_misaligned_load_handler(u32 hartid, ulong mcause,
fp = 1;
len = 4;
#endif
#endif
#endif
} else
return SBI_EILL;
return sbi_trap_redirect(regs, scratch, regs->mepc,
mcause, addr);
val.data_u64 = 0;
for (i = 0; i < len; i++) {
@ -104,10 +109,12 @@ int sbi_misaligned_load_handler(u32 hartid, ulong mcause,
if (!fp)
SET_RD(insn, regs, val.data_ulong << shift >> shift);
#ifdef __riscv_flen
else if (len == 8)
SET_F64_RD(insn, regs, val.data_u64);
else
SET_F32_RD(insn, regs, val.data_ulong);
#endif
regs->mepc += INSN_LEN(insn);
@ -132,12 +139,14 @@ int sbi_misaligned_store_handler(u32 hartid, ulong mcause,
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
len = 8;
#endif
#ifdef __riscv_flen
} else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
len = 8;
val.data_u64 = GET_F64_RS2(insn, regs);
} else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
len = 4;
val.data_ulong = GET_F32_RS2(insn, regs);
#endif
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
len = 2;
#ifdef __riscv_compressed
@ -157,6 +166,7 @@ int sbi_misaligned_store_handler(u32 hartid, ulong mcause,
((insn >> SH_RD) & 0x1f)) {
len = 4;
val.data_ulong = GET_RS2C(insn, regs);
#ifdef __riscv_flen
} else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
len = 8;
val.data_u64 = GET_F64_RS2S(insn, regs);
@ -171,9 +181,11 @@ int sbi_misaligned_store_handler(u32 hartid, ulong mcause,
len = 4;
val.data_ulong = GET_F32_RS2C(insn, regs);
#endif
#endif
#endif
} else
return SBI_EILL;
return sbi_trap_redirect(regs, scratch, regs->mepc,
mcause, addr);
for (i = 0; i < len; i++) {
store_u8((void *)(addr + i), val.data_bytes[i],