diff --git a/qemu/aarch64.h b/qemu/aarch64.h index ca7168cd..961d3a17 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -4402,6 +4402,7 @@ #define mls_op mls_op_aarch64 #define new_tmp_a64 new_tmp_a64_aarch64 #define new_tmp_a64_zero new_tmp_a64_zero_aarch64 +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_aarch64 #define pmu_op_start pmu_op_start_aarch64 #define pmu_op_finish pmu_op_finish_aarch64 #define pmu_pre_el_change pmu_pre_el_change_aarch64 @@ -4423,6 +4424,7 @@ #define uqadd_op uqadd_op_aarch64 #define uqsub_op uqsub_op_aarch64 #define usra_op usra_op_aarch64 +#define v8m_security_lookup v8m_security_lookup_aarch64 #define vfp_expand_imm vfp_expand_imm_aarch64 #define write_fp_dreg write_fp_dreg_aarch64 #endif diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index daf1f02d..8d793be8 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -4402,6 +4402,7 @@ #define mls_op mls_op_aarch64eb #define new_tmp_a64 new_tmp_a64_aarch64eb #define new_tmp_a64_zero new_tmp_a64_zero_aarch64eb +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_aarch64eb #define pmu_op_start pmu_op_start_aarch64eb #define pmu_op_finish pmu_op_finish_aarch64eb #define pmu_pre_el_change pmu_pre_el_change_aarch64eb @@ -4423,6 +4424,7 @@ #define uqadd_op uqadd_op_aarch64eb #define uqsub_op uqsub_op_aarch64eb #define usra_op usra_op_aarch64eb +#define v8m_security_lookup v8m_security_lookup_aarch64eb #define vfp_expand_imm vfp_expand_imm_aarch64eb #define write_fp_dreg write_fp_dreg_aarch64eb #endif diff --git a/qemu/arm.h b/qemu/arm.h index d31ea9c8..42454f18 100644 --- a/qemu/arm.h +++ b/qemu/arm.h @@ -3405,6 +3405,7 @@ #define pmu_init pmu_init_arm #define mla_op mla_op_arm #define mls_op mls_op_arm +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_arm #define pmu_op_start pmu_op_start_arm #define pmu_op_finish pmu_op_finish_arm #define pmu_pre_el_change pmu_pre_el_change_arm @@ -3421,5 +3422,6 @@ #define uqadd_op uqadd_op_arm #define uqsub_op uqsub_op_arm #define usra_op usra_op_arm +#define v8m_security_lookup v8m_security_lookup_arm #define vfp_expand_imm vfp_expand_imm_arm #endif diff --git a/qemu/armeb.h b/qemu/armeb.h index da1e552d..44d9fc76 100644 --- a/qemu/armeb.h +++ b/qemu/armeb.h @@ -3405,6 +3405,7 @@ #define pmu_init pmu_init_armeb #define mla_op mla_op_armeb #define mls_op mls_op_armeb +#define pmsav8_mpu_lookup pmsav8_mpu_lookup_armeb #define pmu_op_start pmu_op_start_armeb #define pmu_op_finish pmu_op_finish_armeb #define pmu_pre_el_change pmu_pre_el_change_armeb @@ -3421,5 +3422,6 @@ #define uqadd_op uqadd_op_armeb #define uqsub_op uqsub_op_armeb #define usra_op usra_op_armeb +#define v8m_security_lookup v8m_security_lookup_armeb #define vfp_expand_imm vfp_expand_imm_armeb #endif diff --git a/qemu/header_gen.py b/qemu/header_gen.py index df8e026d..9f070e58 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -3414,6 +3414,7 @@ arm_symbols = ( 'pmu_init', 'mla_op', 'mls_op', + 'pmsav8_mpu_lookup', 'pmu_op_start', 'pmu_op_finish', 'pmu_pre_el_change', @@ -3430,6 +3431,7 @@ arm_symbols = ( 'uqadd_op', 'uqsub_op', 'usra_op', + 'v8m_security_lookup', 'vfp_expand_imm', ) @@ -4457,6 +4459,7 @@ aarch64_symbols = ( 'mls_op', 'new_tmp_a64', 'new_tmp_a64_zero', + 'pmsav8_mpu_lookup', 'pmu_op_start', 'pmu_op_finish', 'pmu_pre_el_change', @@ -4478,6 +4481,7 @@ aarch64_symbols = ( 'uqadd_op', 'uqsub_op', 'usra_op', + 'v8m_security_lookup', 'vfp_expand_imm', 'write_fp_dreg', ) diff --git a/qemu/target/arm/helper.c b/qemu/target/arm/helper.c index 99d273e5..c3b9652f 100644 --- a/qemu/target/arm/helper.c +++ b/qemu/target/arm/helper.c @@ -30,21 +30,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, target_ulong *page_size_ptr, ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); - -/* Security attributes for an address, as returned by v8m_security_lookup. */ -typedef struct V8M_SAttributes { - bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ - bool ns; - bool nsc; - uint8_t sregion; - bool srvalid; - uint8_t iregion; - bool irvalid; -} V8M_SAttributes; - -static void v8m_security_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - V8M_SAttributes *sattrs); #endif static void switch_mode(CPUARMState *env, int mode); @@ -7478,25 +7463,6 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, return target_el; } -/* - * Return true if the v7M CPACR permits access to the FPU for the specified - * security state and privilege level. - */ -static bool v7m_cpacr_pass(CPUARMState *env, bool is_secure, bool is_priv) -{ - switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { - case 0: - case 2: /* UNPREDICTABLE: we treat like 0 */ - return false; - case 1: - return is_priv; - case 3: - return true; - default: - g_assert_not_reached(); - } -} - /* * What kind of stack write are we doing? This affects how exceptions * generated during the stacking are treated. @@ -11971,9 +11937,9 @@ static bool v8m_is_sau_exempt(CPUARMState *env, (address >= 0xe00ff000 && address <= 0xe00fffff); } -static void v8m_security_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - V8M_SAttributes *sattrs) +void v8m_security_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + V8M_SAttributes *sattrs) { /* * Look up the security attributes for this address. Compare the @@ -12058,11 +12024,11 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, } } -static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - hwaddr *phys_ptr, MemTxAttrs *txattrs, - int *prot, bool *is_subpage, - ARMMMUFaultInfo *fi, uint32_t *mregion) +bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, + int *prot, bool *is_subpage, + ARMMMUFaultInfo *fi, uint32_t *mregion) { /* * Perform a PMSAv8 MPU lookup (without also doing the SAU check diff --git a/qemu/target/arm/internals.h b/qemu/target/arm/internals.h index c7b4270f..d17fdeb2 100644 --- a/qemu/target/arm/internals.h +++ b/qemu/target/arm/internals.h @@ -894,6 +894,27 @@ static inline uint32_t v7m_sp_limit(CPUARMState *env) } } +/** + * v7m_cpacr_pass: + * Return true if the v7M CPACR permits access to the FPU for the specified + * security state and privilege level. + */ +static inline bool v7m_cpacr_pass(CPUARMState *env, + bool is_secure, bool is_priv) +{ + switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { + case 0: + case 2: /* UNPREDICTABLE: we treat like 0 */ + return false; + case 1: + return is_priv; + case 3: + return true; + default: + g_assert_not_reached(); + } +} + /** * aarch32_mode_name(): Return name of the AArch32 CPU mode * @psr: Program Status Register indicating CPU mode @@ -990,6 +1011,27 @@ static inline int exception_target_el(CPUARMState *env) #ifndef CONFIG_USER_ONLY +/* Security attributes for an address, as returned by v8m_security_lookup. */ +typedef struct V8M_SAttributes { + bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ + bool ns; + bool nsc; + uint8_t sregion; + bool srvalid; + uint8_t iregion; + bool irvalid; +} V8M_SAttributes; + +void v8m_security_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + V8M_SAttributes *sattrs); + +bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + hwaddr *phys_ptr, MemTxAttrs *txattrs, + int *prot, bool *is_subpage, + ARMMMUFaultInfo *fi, uint32_t *mregion); + /* Cacheability and shareability attributes for a memory access */ typedef struct ARMCacheAttrs { unsigned int attrs:8; /* as in the MAIR register encoding */