Convert SVE macros into c++ constants and inlines

This patch updates LLDB's in house version of SVE ptrace/sig macros by
converting them into constants and inlines. They are housed under sve
namespace and are used by process elf-core for reading SVE register data.

Reviewed By: labath

Differential Revision: https://reviews.llvm.org/D85641
This commit is contained in:
Muhammad Omair Javaid
2020-08-19 12:27:02 +05:00
parent 5b797eb5b4
commit 090306fc80
2 changed files with 162 additions and 136 deletions

View File

@@ -9,60 +9,57 @@
#ifndef LLDB_SOURCE_PLUGINS_PROCESS_UTILITY_LINUXPTRACEDEFINES_ARM64SVE_H
#define LLDB_SOURCE_PLUGINS_PROCESS_UTILITY_LINUXPTRACEDEFINES_ARM64SVE_H
// LinuxPTraceDefines_arm64sve.h defines essential macros for manipulating
// AArch64 SVE core dump registers. Add guard for aarch64/Linux hosts where
// newer versions of ptrace.h or sigcontext.h might already define SVE macros.
#ifndef SVE_SIG_REGS_OFFSET
#include <stdint.h>
struct aarch64_context {
uint16_t magic;
uint16_t size;
};
#define SVE_MAGIC 0x53564501
struct sve_context {
struct aarch64_context head;
uint16_t vl;
uint16_t reserved[3];
};
namespace lldb_private {
namespace sve {
/*
* The SVE architecture leaves space for future expansion of the
* vector length beyond its initial architectural limit of 2048 bits
* (16 quadwords).
*
* See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
* terminology.
* See <Linux kernel source tree>/Documentation/arm64/sve.rst for a description
* of the vl/vq terminology.
*/
#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
#define SVE_VQ_MIN 1
#define SVE_VQ_MAX 512
const uint16_t vq_bytes = 16; /* number of bytes per quadword */
#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
const uint16_t vq_min = 1;
const uint16_t vq_max = 512;
#define SVE_NUM_ZREGS 32
#define SVE_NUM_PREGS 16
const uint16_t vl_min = vq_min * vq_bytes;
const uint16_t vl_max = vq_max * vq_bytes;
#define sve_vl_valid(vl) \
((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
#define sve_vl_from_vq(vq) ((vq)*SVE_VQ_BYTES)
const uint16_t num_of_zregs = 32;
const uint16_t num_of_pregs = 16;
inline uint16_t vl_valid(uint16_t vl) {
return (vl % vq_bytes == 0 && vl >= vl_min && vl <= vl_max);
}
inline uint16_t vq_from_vl(uint16_t vl) { return vl / vq_bytes; }
inline uint16_t vl_from_vq(uint16_t vq) { return vq * vq_bytes; }
/* A new signal frame record sve_context encodes the SVE Registers on signal
* delivery. sve_context struct definition may be included in asm/sigcontext.h.
* We define sve_context_size which will be used by LLDB sve helper functions.
* More information on sve_context can be found in Linux kernel source tree at
* Documentation/arm64/sve.rst.
*/
const uint16_t sve_context_size = 16;
/*
* If the SVE registers are currently live for the thread at signal delivery,
* sve_context.head.size >=
* SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl))
* and the register data may be accessed using the SVE_SIG_*() macros.
* SigContextSize(vq_from_vl(sve_context.vl))
* and the register data may be accessed using the Sig*() functions.
*
* If sve_context.head.size <
* SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)),
* SigContextSize(vq_from_vl(sve_context.vl)),
* the SVE registers were not live for the thread and no register data
* is included: in this case, the SVE_SIG_*() macros should not be
* is included: in this case, the Sig*() functions should not be
* used except for this check.
*
* The same convention applies when returning from a signal: a caller
@@ -76,27 +73,27 @@ struct sve_context {
* doing a sigreturn.
*
*
* Note: for all these macros, the "vq" argument denotes the SVE
* Note: for all these functions, the "vq" argument denotes the SVE
* vector length in quadwords (i.e., units of 128 bits).
*
* The correct way to obtain vq is to use sve_vq_from_vl(vl). The
* result is valid if and only if sve_vl_valid(vl) is true. This is
* The correct way to obtain vq is to use vq_from_vl(vl). The
* result is valid if and only if vl_valid(vl) is true. This is
* guaranteed for a struct sve_context written by the kernel.
*
*
* Additional macros describe the contents and layout of the payload.
* For each, SVE_SIG_x_OFFSET(args) is the start offset relative to
* the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the
* Additional functions describe the contents and layout of the payload.
* For each, Sig*Offset(args) is the start offset relative to
* the start of struct sve_context, and Sig*Size(args) is the
* size in bytes:
*
* x type description
* - ---- -----------
* REGS the entire SVE context
*
* ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers
* ZREGS __uint128_t[num_of_zregs][vq] all Z-registers
* ZREG __uint128_t[vq] individual Z-register Zn
*
* PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers
* PREGS uint16_t[num_of_pregs][vq] all P-registers
* PREG uint16_t[vq] individual P-register Pn
*
* FFR uint16_t[vq] first-fault status register
@@ -104,35 +101,47 @@ struct sve_context {
* Additional data might be appended in the future.
*/
#define SVE_SIG_ZREG_SIZE(vq) ((uint32_t)(vq)*SVE_VQ_BYTES)
#define SVE_SIG_PREG_SIZE(vq) ((uint32_t)(vq) * (SVE_VQ_BYTES / 8))
#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
inline uint16_t SigZRegSize(uint16_t vq) { return vq * vq_bytes; }
inline uint16_t SigPRegSize(uint16_t vq) { return vq * vq_bytes / 8; }
inline uint16_t SigFFRSize(uint16_t vq) { return SigPRegSize(vq); }
#define SVE_SIG_REGS_OFFSET \
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * \
SVE_VQ_BYTES)
inline uint32_t SigRegsOffset() {
return (sve_context_size + vq_bytes - 1) / vq_bytes * vq_bytes;
}
#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
#define SVE_SIG_ZREG_OFFSET(vq, n) \
(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
#define SVE_SIG_ZREGS_SIZE(vq) \
(SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
inline uint32_t SigZRegsOffset() { return SigRegsOffset(); }
#define SVE_SIG_PREGS_OFFSET(vq) (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
#define SVE_SIG_PREG_OFFSET(vq, n) \
(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
#define SVE_SIG_PREGS_SIZE(vq) \
(SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
inline uint32_t SigZRegOffset(uint16_t vq, uint16_t n) {
return SigRegsOffset() + SigZRegSize(vq) * n;
}
#define SVE_SIG_FFR_OFFSET(vq) \
(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
inline uint32_t SigZRegsSize(uint16_t vq) {
return SigZRegOffset(vq, num_of_zregs) - SigRegsOffset();
}
#define SVE_SIG_REGS_SIZE(vq) \
(SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
inline uint32_t SigPRegsOffset(uint16_t vq) {
return SigRegsOffset() + SigZRegsSize(vq);
}
#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
inline uint32_t SigPRegOffset(uint16_t vq, uint16_t n) {
return SigPRegsOffset(vq) + SigPRegSize(vq) * n;
}
/* SVE/FP/SIMD state (NT_ARM_SVE) */
inline uint32_t SigpRegsSize(uint16_t vq) {
return SigPRegOffset(vq, num_of_pregs) - SigPRegsOffset(vq);
}
inline uint32_t SigFFROffset(uint16_t vq) {
return SigPRegsOffset(vq) + SigpRegsSize(vq);
}
inline uint32_t SigRegsSize(uint16_t vq) {
return SigFFROffset(vq) + SigFFRSize(vq) - SigRegsOffset();
}
inline uint32_t SVESigContextSize(uint16_t vq) {
return SigRegsOffset() + SigRegsSize(vq);
}
struct user_sve_header {
uint32_t size; /* total meaningful regset content in bytes */
@@ -144,22 +153,14 @@ struct user_sve_header {
};
/* Definitions for user_sve_header.flags: */
#define SVE_PT_REGS_MASK (1 << 0)
#define SVE_PT_REGS_FPSIMD 0
#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
/*
* Common SVE_PT_* flags:
* These must be kept in sync with prctl interface in <linux/ptrace.h>
*/
#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16)
#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16)
const uint16_t ptrace_regs_mask = 1 << 0;
const uint16_t ptrace_regs_fpsimd = 0;
const uint16_t ptrace_regs_sve = ptrace_regs_mask;
/*
* The remainder of the SVE state follows struct user_sve_header. The
* total size of the SVE state (including header) depends on the
* metadata in the header: SVE_PT_SIZE(vq, flags) gives the total size
* metadata in the header: PTraceSize(vq, flags) gives the total size
* of the state in bytes, including the header.
*
* Refer to <asm/sigcontext.h> for details of how to pass the correct
@@ -167,9 +168,9 @@ struct user_sve_header {
*/
/* Offset from the start of struct user_sve_header to the register data */
#define SVE_PT_REGS_OFFSET \
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * \
SVE_VQ_BYTES)
inline uint16_t PTraceRegsOffset() {
return (sizeof(struct user_sve_header) + vq_bytes - 1) / vq_bytes * vq_bytes;
}
/*
* The register data content and layout depends on the value of the
@@ -177,28 +178,29 @@ struct user_sve_header {
*/
/*
* (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case:
* (flags & ptrace_regs_mask) == ptrace_regs_fpsimd case:
*
* The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type
* The payload starts at offset PTraceFPSIMDOffset, and is of type
* struct user_fpsimd_state. Additional data might be appended in the
* future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size.
* SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than
* future: use PTraceFPSIMDSize(vq, flags) to compute the total size.
* PTraceFPSIMDSize(vq, flags) will never be less than
* sizeof(struct user_fpsimd_state).
*/
#define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET
const uint32_t ptrace_fpsimd_offset = PTraceRegsOffset();
#define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state))
/* Return size of struct user_fpsimd_state from asm/ptrace.h */
inline uint32_t PTraceFPSIMDSize(uint16_t vq, uint16_t flags) { return 528; }
/*
* (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case:
* (flags & ptrace_regs_mask) == ptrace_regs_sve case:
*
* The payload starts at offset SVE_PT_SVE_OFFSET, and is of size
* SVE_PT_SVE_SIZE(vq, flags).
* The payload starts at offset PTraceSVEOffset, and is of size
* PTraceSVESize(vq, flags).
*
* Additional macros describe the contents and layout of the payload.
* For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to
* the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is
* Additional functions describe the contents and layout of the payload.
* For each, PTrace*X*Offset(args) is the start offset relative to
* the start of struct user_sve_header, and PTrace*X*Size(args) is
* the size in bytes:
*
* x type description
@@ -215,53 +217,75 @@ struct user_sve_header {
* Additional data might be appended in the future.
*/
#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq)
#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq)
#define SVE_PT_SVE_FPSR_SIZE sizeof(uint32_t)
#define SVE_PT_SVE_FPCR_SIZE sizeof(uint32_t)
inline uint32_t PTraceZRegSize(uint16_t vq) { return SigZRegSize(vq); }
#define __SVE_SIG_TO_PT(offset) \
((offset)-SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
inline uint32_t PTracePRegSize(uint16_t vq) { return SigPRegSize(vq); }
#define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET
inline uint32_t PTraceFFRSize(uint16_t vq) { return SigFFRSize(vq); }
#define SVE_PT_SVE_ZREGS_OFFSET __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \
__SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
#define SVE_PT_SVE_ZREGS_SIZE(vq) \
(SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
const uint32_t fpsr_size = sizeof(uint32_t);
const uint32_t fpcr_size = sizeof(uint32_t);
#define SVE_PT_SVE_PREGS_OFFSET(vq) __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
#define SVE_PT_SVE_PREG_OFFSET(vq, n) \
__SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
#define SVE_PT_SVE_PREGS_SIZE(vq) \
(SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_PT_SVE_PREGS_OFFSET(vq))
inline uint32_t SigToPTrace(uint32_t offset) {
return offset - SigRegsOffset() + PTraceRegsOffset();
}
#define SVE_PT_SVE_FFR_OFFSET(vq) __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
const uint32_t ptrace_sve_offset = PTraceRegsOffset();
#define SVE_PT_SVE_FPSR_OFFSET(vq) \
((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \
(SVE_VQ_BYTES - 1)) / \
SVE_VQ_BYTES * SVE_VQ_BYTES)
#define SVE_PT_SVE_FPCR_OFFSET(vq) \
(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
inline uint32_t PTraceZRegsOffset(uint16_t vq) {
return SigToPTrace(SigZRegsOffset());
}
inline uint32_t PTraceZRegOffset(uint16_t vq, uint16_t n) {
return SigToPTrace(SigZRegOffset(vq, n));
}
inline uint32_t PTraceZRegsSize(uint16_t vq) {
return PTraceZRegOffset(vq, num_of_zregs) - SigToPTrace(SigRegsOffset());
}
inline uint32_t PTracePRegsOffset(uint16_t vq) {
return SigToPTrace(SigPRegsOffset(vq));
}
inline uint32_t PTracePRegOffset(uint16_t vq, uint16_t n) {
return SigToPTrace(SigPRegOffset(vq, n));
}
inline uint32_t PTracePRegsSize(uint16_t vq) {
return PTracePRegOffset(vq, num_of_pregs) - PTracePRegsOffset(vq);
}
inline uint32_t PTraceFFROffset(uint16_t vq) {
return SigToPTrace(SigFFROffset(vq));
}
inline uint32_t PTraceFPSROffset(uint16_t vq) {
return (PTraceFFROffset(vq) + PTraceFFRSize(vq) + (vq_bytes - 1)) / vq_bytes *
vq_bytes;
}
inline uint32_t PTraceFPCROffset(uint16_t vq) {
return PTraceFPSROffset(vq) + fpsr_size;
}
/*
* Any future extension appended after FPCR must be aligned to the next
* 128-bit boundary.
*/
#define SVE_PT_SVE_SIZE(vq, flags) \
((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE - SVE_PT_SVE_OFFSET + \
(SVE_VQ_BYTES - 1)) / \
SVE_VQ_BYTES * SVE_VQ_BYTES)
inline uint32_t PTraceSVESize(uint16_t vq, uint16_t flags) {
return (PTraceFPCROffset(vq) + fpcr_size - ptrace_sve_offset + vq_bytes - 1) /
vq_bytes * vq_bytes;
}
#define SVE_PT_SIZE(vq, flags) \
(((flags)&SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE \
? SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
: SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
inline uint32_t PTraceSize(uint16_t vq, uint16_t flags) {
return (flags & ptrace_regs_mask) == ptrace_regs_sve
? ptrace_sve_offset + PTraceSVESize(vq, flags)
: ptrace_fpsimd_offset + PTraceFPSIMDSize(vq, flags);
}
#endif // SVE_SIG_REGS_OFFSET
} // namespace SVE
} // namespace lldb_private
#endif // LLDB_SOURCE_PLUGINS_PROCESS_UTILITY_LINUXPTRACEDEFINES_ARM64SVE_H

View File

@@ -57,20 +57,22 @@ const uint8_t *RegisterContextCorePOSIX_arm64::GetSVEBuffer(uint64_t offset) {
}
void RegisterContextCorePOSIX_arm64::ConfigureRegisterContext() {
if (m_sveregset.GetByteSize() > sizeof(user_sve_header)) {
if (m_sveregset.GetByteSize() > sizeof(sve::user_sve_header)) {
uint64_t sve_header_field_offset = 8;
m_sve_vector_length = m_sveregset.GetU16(&sve_header_field_offset);
sve_header_field_offset = 12;
uint16_t sve_header_flags_field =
m_sveregset.GetU16(&sve_header_field_offset);
if ((sve_header_flags_field & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
if ((sve_header_flags_field & sve::ptrace_regs_mask) ==
sve::ptrace_regs_fpsimd)
m_sve_state = SVEState::FPSIMD;
else if ((sve_header_flags_field & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE)
else if ((sve_header_flags_field & sve::ptrace_regs_mask) ==
sve::ptrace_regs_sve)
m_sve_state = SVEState::Full;
if (sve_vl_valid(m_sve_vector_length))
if (sve::vl_valid(m_sve_vector_length))
m_register_info_up->ConfigureVectorRegisterInfos(
sve_vq_from_vl(m_sve_vector_length));
sve::vq_from_vl(m_sve_vector_length));
else {
m_sve_state = SVEState::Disabled;
m_sve_vector_length = 0;
@@ -85,11 +87,11 @@ uint32_t RegisterContextCorePOSIX_arm64::CalculateSVEOffset(
uint32_t sve_reg_offset = LLDB_INVALID_INDEX32;
if (m_sve_state == SVEState::FPSIMD) {
const uint32_t reg = reg_info->kinds[lldb::eRegisterKindLLDB];
sve_reg_offset = SVE_PT_FPSIMD_OFFSET + (reg - GetRegNumSVEZ0()) * 16;
sve_reg_offset = sve::ptrace_fpsimd_offset + (reg - GetRegNumSVEZ0()) * 16;
} else if (m_sve_state == SVEState::Full) {
uint32_t sve_z0_offset = GetGPRSize() + 8;
sve_reg_offset =
SVE_SIG_REGS_OFFSET + reg_info->byte_offset - sve_z0_offset;
sve::SigRegsOffset() + reg_info->byte_offset - sve_z0_offset;
}
return sve_reg_offset;
@@ -132,15 +134,15 @@ bool RegisterContextCorePOSIX_arm64::ReadRegister(const RegisterInfo *reg_info,
if (reg == GetRegNumFPSR()) {
sve_reg_num = reg;
if (m_sve_state == SVEState::Full)
offset = SVE_PT_SVE_FPSR_OFFSET(sve_vq_from_vl(m_sve_vector_length));
offset = sve::PTraceFPSROffset(sve::vq_from_vl(m_sve_vector_length));
else if (m_sve_state == SVEState::FPSIMD)
offset = SVE_PT_FPSIMD_OFFSET + (32 * 16);
offset = sve::ptrace_fpsimd_offset + (32 * 16);
} else if (reg == GetRegNumFPCR()) {
sve_reg_num = reg;
if (m_sve_state == SVEState::Full)
offset = SVE_PT_SVE_FPCR_OFFSET(sve_vq_from_vl(m_sve_vector_length));
offset = sve::PTraceFPCROffset(sve::vq_from_vl(m_sve_vector_length));
else if (m_sve_state == SVEState::FPSIMD)
offset = SVE_PT_FPSIMD_OFFSET + (32 * 16) + 4;
offset = sve::ptrace_fpsimd_offset + (32 * 16) + 4;
} else {
// Extract SVE Z register value register number for this reg_info
if (reg_info->value_regs &&