mirror of
				https://gitlab.com/qemu-project/qemu.git
				synced 2025-10-30 07:57:14 +08:00 
			
		
		
		
	include/hw/core/cpu: Introduce cpu_tlb_fast
Encapsulate access to cpu->neg.tlb.f[] in a function. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
		| @ -129,7 +129,7 @@ static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry) | ||||
| static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx, | ||||
|                                   vaddr addr) | ||||
| { | ||||
|     uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; | ||||
|     uintptr_t size_mask = cpu_tlb_fast(cpu, mmu_idx)->mask >> CPU_TLB_ENTRY_BITS; | ||||
|  | ||||
|     return (addr >> TARGET_PAGE_BITS) & size_mask; | ||||
| } | ||||
| @ -138,7 +138,7 @@ static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx, | ||||
| static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx, | ||||
|                                      vaddr addr) | ||||
| { | ||||
|     return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)]; | ||||
|     return &cpu_tlb_fast(cpu, mmu_idx)->table[tlb_index(cpu, mmu_idx, addr)]; | ||||
| } | ||||
|  | ||||
| static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, | ||||
| @ -292,7 +292,7 @@ static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, | ||||
|                                         int64_t now) | ||||
| { | ||||
|     CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; | ||||
|     CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; | ||||
|     CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx); | ||||
|  | ||||
|     tlb_mmu_resize_locked(desc, fast, now); | ||||
|     tlb_mmu_flush_locked(desc, fast); | ||||
| @ -331,7 +331,7 @@ void tlb_init(CPUState *cpu) | ||||
|     cpu->neg.tlb.c.dirty = 0; | ||||
|  | ||||
|     for (i = 0; i < NB_MMU_MODES; i++) { | ||||
|         tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); | ||||
|         tlb_mmu_init(&cpu->neg.tlb.d[i], cpu_tlb_fast(cpu, i), now); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -342,7 +342,7 @@ void tlb_destroy(CPUState *cpu) | ||||
|     qemu_spin_destroy(&cpu->neg.tlb.c.lock); | ||||
|     for (i = 0; i < NB_MMU_MODES; i++) { | ||||
|         CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; | ||||
|         CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; | ||||
|         CPUTLBDescFast *fast = cpu_tlb_fast(cpu, i); | ||||
|  | ||||
|         g_free(fast->table); | ||||
|         g_free(desc->fulltlb); | ||||
| @ -667,7 +667,7 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, | ||||
|                                    unsigned bits) | ||||
| { | ||||
|     CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; | ||||
|     CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; | ||||
|     CPUTLBDescFast *f = cpu_tlb_fast(cpu, midx); | ||||
|     vaddr mask = MAKE_64BIT_MASK(0, bits); | ||||
|  | ||||
|     /* | ||||
| @ -923,7 +923,7 @@ void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length) | ||||
|     qemu_spin_lock(&cpu->neg.tlb.c.lock); | ||||
|     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { | ||||
|         CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; | ||||
|         CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; | ||||
|         CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx); | ||||
|         unsigned int n = tlb_n_entries(fast); | ||||
|         unsigned int i; | ||||
|  | ||||
| @ -1316,7 +1316,7 @@ static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, | ||||
|  | ||||
|         if (cmp == page) { | ||||
|             /* Found entry in victim tlb, swap tlb and iotlb.  */ | ||||
|             CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; | ||||
|             CPUTLBEntry tmptlb, *tlb = &cpu_tlb_fast(cpu, mmu_idx)->table[index]; | ||||
|  | ||||
|             qemu_spin_lock(&cpu->neg.tlb.c.lock); | ||||
|             copy_tlb_helper_locked(&tmptlb, tlb); | ||||
|  | ||||
| @ -602,6 +602,13 @@ static inline CPUArchState *cpu_env(CPUState *cpu) | ||||
|     return (CPUArchState *)(cpu + 1); | ||||
| } | ||||
|  | ||||
| #ifdef CONFIG_TCG | ||||
| static inline CPUTLBDescFast *cpu_tlb_fast(CPUState *cpu, int mmu_idx) | ||||
| { | ||||
|     return &cpu->neg.tlb.f[mmu_idx]; | ||||
| } | ||||
| #endif | ||||
|  | ||||
| typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; | ||||
| extern CPUTailQ cpus_queue; | ||||
|  | ||||
|  | ||||
| @ -1668,7 +1668,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||||
|         ldst->oi = oi; | ||||
|         ldst->addr_reg = addr_reg; | ||||
|  | ||||
|         /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ | ||||
|         /* Load CPUTLBDescFast.{mask,table} into {tmp0,tmp1}. */ | ||||
|         QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); | ||||
|         QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); | ||||
|         tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0, | ||||
|  | ||||
| @ -1421,7 +1421,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, | ||||
|         ldst->oi = oi; | ||||
|         ldst->addr_reg = addr; | ||||
|  | ||||
|         /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}.  */ | ||||
|         /* Load CPUTLBDescFast.{mask,table} into {r0,r1}.  */ | ||||
|         QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); | ||||
|         QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); | ||||
|         tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); | ||||
|  | ||||
		Reference in New Issue
	
	Block a user
	 Richard Henderson
					Richard Henderson