tcg: Add tcg_gen_atomic_{xchg,fetch_and,fetch_or}_i128

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20250815122653.701782-5-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson
2025-08-15 22:26:50 +10:00
committed by Peter Maydell
parent 33aefd187e
commit 092ac2481a
5 changed files with 125 additions and 3 deletions

View File

@ -122,5 +122,14 @@ GEN_ATOMIC_HELPERS(umax_fetch)
GEN_ATOMIC_HELPERS(xchg)
#if HAVE_CMPXCHG128
ATOMIC_HELPER(xchgo_be, Int128)
ATOMIC_HELPER(xchgo_le, Int128)
ATOMIC_HELPER(fetch_ando_be, Int128)
ATOMIC_HELPER(fetch_ando_le, Int128)
ATOMIC_HELPER(fetch_oro_be, Int128)
ATOMIC_HELPER(fetch_oro_le, Int128)
#endif
#undef ATOMIC_HELPER
#undef GEN_ATOMIC_HELPERS

View File

@ -63,6 +63,18 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_4(atomic_xchgo_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i32)
DEF_HELPER_FLAGS_4(atomic_xchgo_le, TCG_CALL_NO_WG,
i128, env, i64, i128, i32)
DEF_HELPER_FLAGS_4(atomic_fetch_ando_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i32)
DEF_HELPER_FLAGS_4(atomic_fetch_ando_le, TCG_CALL_NO_WG,
i128, env, i64, i128, i32)
DEF_HELPER_FLAGS_4(atomic_fetch_oro_be, TCG_CALL_NO_WG,
i128, env, i64, i128, i32)
DEF_HELPER_FLAGS_4(atomic_fetch_oro_le, TCG_CALL_NO_WG,
i128, env, i64, i128, i32)
#endif
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG,

View File

@ -344,6 +344,8 @@ void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
TCGArg, MemOp, TCGType);
void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
TCGArg, MemOp, TCGType);
void tcg_gen_atomic_xchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
TCGArg, MemOp, TCGType);
void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
TCGArg, MemOp, TCGType);
@ -411,6 +413,11 @@ void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
TCGArg, MemOp, TCGType);
void tcg_gen_atomic_fetch_and_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
TCGArg, MemOp, TCGType);
void tcg_gen_atomic_fetch_or_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
TCGArg, MemOp, TCGType);
/* Vector ops */
void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);

View File

@ -134,13 +134,16 @@ DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i128)
DEF_ATOMIC2(tcg_gen_atomic_xchg, i32)
DEF_ATOMIC2(tcg_gen_atomic_xchg, i64)
DEF_ATOMIC2(tcg_gen_atomic_xchg, i128)
DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i32)
DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i64)
DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i32)
DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i64)
DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i128)
DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i32)
DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i64)
DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i128)
DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i32)
DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i64)
DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i32)

View File

@ -801,6 +801,8 @@ typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64,
TCGv_i32, TCGv_i32);
typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64,
TCGv_i64, TCGv_i32);
typedef void (*gen_atomic_op_i128)(TCGv_i128, TCGv_env, TCGv_i64,
TCGv_i128, TCGv_i32);
#ifdef CONFIG_ATOMIC64
# define WITH_ATOMIC64(X) X,
@ -1201,6 +1203,94 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val,
}
}
static void do_nonatomic_op_i128(TCGv_i128 ret, TCGTemp *addr, TCGv_i128 val,
TCGArg idx, MemOp memop, bool new_val,
void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
{
TCGv_i128 t = tcg_temp_ebb_new_i128();
TCGv_i128 r = tcg_temp_ebb_new_i128();
tcg_gen_qemu_ld_i128_int(r, addr, idx, memop);
gen(TCGV128_LOW(t), TCGV128_LOW(r), TCGV128_LOW(val));
gen(TCGV128_HIGH(t), TCGV128_HIGH(r), TCGV128_HIGH(val));
tcg_gen_qemu_st_i128_int(t, addr, idx, memop);
tcg_gen_mov_i128(ret, r);
tcg_temp_free_i128(t);
tcg_temp_free_i128(r);
}
static void do_atomic_op_i128(TCGv_i128 ret, TCGTemp *addr, TCGv_i128 val,
TCGArg idx, MemOp memop, void * const table[])
{
gen_atomic_op_i128 gen = table[memop & (MO_SIZE | MO_BSWAP)];
if (gen) {
MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx);
TCGv_i64 a64 = maybe_extend_addr64(addr);
gen(ret, tcg_env, a64, val, tcg_constant_i32(oi));
maybe_free_addr64(a64);
return;
}
gen_helper_exit_atomic(tcg_env);
/* Produce a result */
tcg_gen_movi_i64(TCGV128_LOW(ret), 0);
tcg_gen_movi_i64(TCGV128_HIGH(ret), 0);
}
#define GEN_ATOMIC_HELPER128(NAME, OP, NEW) \
static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
[MO_8] = gen_helper_atomic_##NAME##b, \
[MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
[MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
[MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \
[MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \
WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \
WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \
WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_##NAME##o_le) \
WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_##NAME##o_be) \
}; \
void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr, \
TCGv_i32 val, TCGArg idx, \
MemOp memop, TCGType addr_type) \
{ \
tcg_debug_assert(addr_type == tcg_ctx->addr_type); \
tcg_debug_assert((memop & MO_SIZE) <= MO_32); \
if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
tcg_gen_##OP##_i32); \
} \
} \
void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \
TCGv_i64 val, TCGArg idx, \
MemOp memop, TCGType addr_type) \
{ \
tcg_debug_assert(addr_type == tcg_ctx->addr_type); \
tcg_debug_assert((memop & MO_SIZE) <= MO_64); \
if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
tcg_gen_##OP##_i64); \
} \
} \
void tcg_gen_atomic_##NAME##_i128_chk(TCGv_i128 ret, TCGTemp *addr, \
TCGv_i128 val, TCGArg idx, \
MemOp memop, TCGType addr_type) \
{ \
tcg_debug_assert(addr_type == tcg_ctx->addr_type); \
tcg_debug_assert((memop & MO_SIZE) == MO_128); \
if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
do_atomic_op_i128(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i128(ret, addr, val, idx, memop, NEW, \
tcg_gen_##OP##_i64); \
} \
}
#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
[MO_8] = gen_helper_atomic_##NAME##b, \
@ -1239,8 +1329,8 @@ void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \
}
GEN_ATOMIC_HELPER(fetch_add, add, 0)
GEN_ATOMIC_HELPER(fetch_and, and, 0)
GEN_ATOMIC_HELPER(fetch_or, or, 0)
GEN_ATOMIC_HELPER128(fetch_and, and, 0)
GEN_ATOMIC_HELPER128(fetch_or, or, 0)
GEN_ATOMIC_HELPER(fetch_xor, xor, 0)
GEN_ATOMIC_HELPER(fetch_smin, smin, 0)
GEN_ATOMIC_HELPER(fetch_umin, umin, 0)
@ -1266,6 +1356,7 @@ static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b)
tcg_gen_mov_i64(r, b);
}
GEN_ATOMIC_HELPER(xchg, mov2, 0)
GEN_ATOMIC_HELPER128(xchg, mov2, 0)
#undef GEN_ATOMIC_HELPER
#undef GEN_ATOMIC_HELPER128