From bb62eca626125bc8d1e2e5468266099352a3dedb Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 3 Mar 2023 22:53:10 +0000 Subject: [PATCH 1/5] aarch64: Pull final DSB ISH; ISB out of aarch64_tlbi_*. No functional change intended. This paves the way to batch up the barriers into pmap_update() in a subsequent change. --- sys/arch/aarch64/aarch64/aarch64_machdep.c | 4 +++ sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S | 12 ------- sys/arch/aarch64/aarch64/db_interface.c | 4 +++ sys/arch/aarch64/aarch64/db_machdep.c | 2 ++ sys/arch/aarch64/aarch64/pmap.c | 34 ++++++++++++++++++++ sys/arch/aarch64/aarch64/pmap_machdep.c | 2 ++ 6 files changed, 46 insertions(+), 12 deletions(-) diff --git a/sys/arch/aarch64/aarch64/aarch64_machdep.c b/sys/arch/aarch64/aarch64/aarch64_machdep.c index 9ca48f26f292..ab9ebddfe552 100644 --- a/sys/arch/aarch64/aarch64/aarch64_machdep.c +++ b/sys/arch/aarch64/aarch64/aarch64_machdep.c @@ -179,6 +179,8 @@ cpu_kernel_vm_init(uint64_t memory_start __unused, uint64_t memory_size __unused isb(); aarch64_tlbi_all(); + dsb(ish); + isb(); /* * at this point, whole kernel image is mapped as "rwx". @@ -196,6 +198,8 @@ cpu_kernel_vm_init(uint64_t memory_start __unused, uint64_t memory_size __unused L2_ROUND_BLOCK(kernend), VM_PROT_EXECUTE); aarch64_tlbi_all(); + dsb(ish); + isb(); VPRINTF("%s: kernel phys start %lx end %lx+%lx\n", __func__, kernstart_phys, kernend_phys, kernend_extra); diff --git a/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S b/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S index 40efba6dcf46..1b83bf9fd795 100644 --- a/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S +++ b/sys/arch/aarch64/aarch64/cpufunc_asm_armv8.S @@ -212,8 +212,6 @@ ENTRY(aarch64_tlbi_all) #else tlbi vmalle1 #endif - dsb ish - isb ret END(aarch64_tlbi_all) @@ -227,8 +225,6 @@ ENTRY(aarch64_tlbi_by_asid) #else tlbi aside1, x8 #endif - dsb ish - isb ret END(aarch64_tlbi_by_asid) @@ -242,8 +238,6 @@ ENTRY(aarch64_tlbi_by_va) #else tlbi vaae1, x8 #endif - dsb ish - isb ret END(aarch64_tlbi_by_va) @@ -257,8 +251,6 @@ ENTRY(aarch64_tlbi_by_va_ll) #else tlbi vaale1, x8 #endif - dsb ish - isb ret END(aarch64_tlbi_by_va_ll) @@ -273,8 +265,6 @@ ENTRY(aarch64_tlbi_by_asid_va) #else tlbi vae1, x8 #endif - dsb ish - isb ret END(aarch64_tlbi_by_asid_va) @@ -289,7 +279,5 @@ ENTRY(aarch64_tlbi_by_asid_va_ll) #else tlbi vale1, x8 #endif - dsb ish - isb ret END(aarch64_tlbi_by_asid_va_ll) diff --git a/sys/arch/aarch64/aarch64/db_interface.c b/sys/arch/aarch64/aarch64/db_interface.c index c8b10d0af8b9..ed4b8d75313c 100644 --- a/sys/arch/aarch64/aarch64/db_interface.c +++ b/sys/arch/aarch64/aarch64/db_interface.c @@ -172,6 +172,8 @@ db_write_text(vaddr_t addr, size_t size, const char *data) /* old pte is returned by pmap_kvattr */ pte = pmap_kvattr(ptep, VM_PROT_EXECUTE | VM_PROT_READ | VM_PROT_WRITE); aarch64_tlbi_all(); + dsb(ish); + isb(); s = size; if (size > PAGE_SIZE) @@ -183,6 +185,8 @@ db_write_text(vaddr_t addr, size_t size, const char *data) /* restore pte */ *ptep = pte; aarch64_tlbi_all(); + dsb(ish); + isb(); addr += s; size -= s; diff --git a/sys/arch/aarch64/aarch64/db_machdep.c b/sys/arch/aarch64/aarch64/db_machdep.c index 01da67e48afa..6a9c659f7d76 100644 --- a/sys/arch/aarch64/aarch64/db_machdep.c +++ b/sys/arch/aarch64/aarch64/db_machdep.c @@ -527,6 +527,8 @@ db_md_tlbi_cmd(db_expr_t addr, bool have_addr, db_expr_t count, const char *modif) { aarch64_tlbi_all(); + dsb(ish); + isb(); } void diff --git a/sys/arch/aarch64/aarch64/pmap.c b/sys/arch/aarch64/aarch64/pmap.c index 57ead954ca1e..bf38bc851e70 100644 --- a/sys/arch/aarch64/aarch64/pmap.c +++ b/sys/arch/aarch64/aarch64/pmap.c @@ -180,6 +180,8 @@ PMAP_COUNTER(unwire_failure, "pmap_unwire failure"); do { \ atomic_swap_64((ptep), (pte) | LX_BLKPAG_AF); \ AARCH64_TLBI_BY_ASID_VA((asid), (va)); \ + dsb(ish); \ + isb(); \ cpu_icache_sync_range((va), PAGE_SIZE); \ } while (0/*CONSTCOND*/) @@ -1020,9 +1022,13 @@ pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) atomic_swap_64(ptep, pte | LX_BLKPAG_AF); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); cpu_icache_sync_range(va, len); atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); } } @@ -1371,6 +1377,8 @@ _pmap_protect_pv(struct pmap_page *pp, struct pv_entry *pv, vm_prot_t prot) struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, trunc_page(pv->pv_va)); + dsb(ish); + isb(); } void @@ -1484,14 +1492,20 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) va); atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); cpu_icache_sync_range(va, PAGE_SIZE); } } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); } } @@ -2100,6 +2114,8 @@ _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, cpu_tlb_info(ci)); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); } PMAP_COUNT(pv_entry_cannotalloc); if (flags & PMAP_CANFAIL) @@ -2150,14 +2166,20 @@ _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, PTE_ICACHE_SYNC_PAGE(pte, ptep, asid, va); atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(asid, va); + dsb(ish); + isb(); } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(asid, va); + dsb(ish); + isb(); cpu_icache_sync_range(va, PAGE_SIZE); } } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(asid, va); + dsb(ish); + isb(); } if (pte & LX_BLKPAG_OS_WIRED) { @@ -2242,6 +2264,8 @@ pmap_remove_all(struct pmap *pm) pmap_zero_page(pm->pm_l0table_pa); aarch64_tlbi_by_asid(PMAP_PAI(pm, cpu_tlb_info(ci))->pai_asid); + dsb(ish); + isb(); /* free L1-L3 page table pages, but not L0 */ _pmap_free_pdp_all(pm, false); @@ -2311,6 +2335,8 @@ _pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva, bool kremove, pdpremoved = _pmap_pdp_delref(pm, AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep)), true); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); if (pdpremoved) { /* @@ -2376,6 +2402,8 @@ pmap_page_remove(struct pmap_page *pp, vm_prot_t prot) AARCH64_KVA_TO_PA(trunc_page( (vaddr_t)pv->pv_ptep)), false); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); if ((opte & LX_BLKPAG_OS_WIRED) != 0) { _pmap_adj_wired_count(pm, -1); @@ -2612,6 +2640,8 @@ pmap_fault_fixup(struct pmap *pm, vaddr_t va, vm_prot_t accessprot, bool user) atomic_swap_64(ptep, pte); struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); fixed = true; @@ -2696,6 +2726,8 @@ pmap_clear_modify(struct vm_page *pg) struct pmap * const pm = pv->pv_pmap; struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); UVMHIST_LOG(pmaphist, "va=%016llx, ptep=%p, pa=%016lx, RW -> RO", @@ -2774,6 +2806,8 @@ pmap_clear_reference(struct vm_page *pg) struct pmap * const pm = pv->pv_pmap; struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); + dsb(ish); + isb(); UVMHIST_LOG(pmaphist, "va=%016llx, ptep=%p, pa=%016lx, unse AF", va, ptep, l3pte_pa(pte), 0); diff --git a/sys/arch/aarch64/aarch64/pmap_machdep.c b/sys/arch/aarch64/aarch64/pmap_machdep.c index 82b02332abea..2d342b4c3576 100644 --- a/sys/arch/aarch64/aarch64/pmap_machdep.c +++ b/sys/arch/aarch64/aarch64/pmap_machdep.c @@ -423,6 +423,8 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t vend) virtual_end = vend; aarch64_tlbi_all(); + dsb(ish); + isb(); pm->pm_l0_pa = __SHIFTOUT(reg_ttbr1_el1_read(), TTBR_BADDR); pm->pm_pdetab = (pmap_pdetab_t *)AARCH64_PA_TO_KVA(pm->pm_l0_pa); From 8fa05f37af1988b6cdec38fcdbbd54411577b38b Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 3 Mar 2023 22:55:43 +0000 Subject: [PATCH 2/5] aarch64: Make pmap_update do DSB ISH; ISB. This way we can remove safely individual barriers from several pmap(9) functions in order to get a batch advantage. --- sys/arch/aarch64/aarch64/pmap.c | 8 ++++++++ sys/arch/aarch64/include/pmap.h | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/sys/arch/aarch64/aarch64/pmap.c b/sys/arch/aarch64/aarch64/pmap.c index bf38bc851e70..cd1a90e8fbba 100644 --- a/sys/arch/aarch64/aarch64/pmap.c +++ b/sys/arch/aarch64/aarch64/pmap.c @@ -185,6 +185,14 @@ PMAP_COUNTER(unwire_failure, "pmap_unwire failure"); cpu_icache_sync_range((va), PAGE_SIZE); \ } while (0/*CONSTCOND*/) +void +pmap_update(struct pmap *pmap) +{ + + dsb(ish); + isb(); +} + #define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp) #define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1) diff --git a/sys/arch/aarch64/include/pmap.h b/sys/arch/aarch64/include/pmap.h index d2eec38d8500..827672c617e5 100644 --- a/sys/arch/aarch64/include/pmap.h +++ b/sys/arch/aarch64/include/pmap.h @@ -397,11 +397,12 @@ pte_valid_p(pt_entry_t pte) pt_entry_t *kvtopte(vaddr_t); -#define pmap_update(pmap) ((void)0) #define pmap_copy(dp,sp,d,l,s) ((void)0) #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) +void pmap_update(struct pmap *); + struct pmap * pmap_efirt(void); void pmap_activate_efirt(void); From 7767283f4af45c8d70beb9d56a2956946cd95f17 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 3 Mar 2023 22:58:45 +0000 Subject: [PATCH 3/5] aarch64: Omit needless DSB ISH; ISB in pmap_enter, pmap_kenter_pa. Handled by pmap_update now. --- sys/arch/aarch64/aarch64/pmap.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/sys/arch/aarch64/aarch64/pmap.c b/sys/arch/aarch64/aarch64/pmap.c index cd1a90e8fbba..c4085b9ae13b 100644 --- a/sys/arch/aarch64/aarch64/pmap.c +++ b/sys/arch/aarch64/aarch64/pmap.c @@ -2122,8 +2122,6 @@ _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, cpu_tlb_info(ci)); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); - dsb(ish); - isb(); } PMAP_COUNT(pv_entry_cannotalloc); if (flags & PMAP_CANFAIL) @@ -2174,8 +2172,6 @@ _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, PTE_ICACHE_SYNC_PAGE(pte, ptep, asid, va); atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(asid, va); - dsb(ish); - isb(); } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(asid, va); @@ -2186,8 +2182,6 @@ _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(asid, va); - dsb(ish); - isb(); } if (pte & LX_BLKPAG_OS_WIRED) { From 0585e2da306a28127ea1d95b0f36ba7cfa74a961 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 3 Mar 2023 23:00:08 +0000 Subject: [PATCH 4/5] aarch64: Omit needless DSB ISH; ISB in pmap_remove, pmap_kremove. Handled by pmap_update now. --- sys/arch/aarch64/aarch64/pmap.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/sys/arch/aarch64/aarch64/pmap.c b/sys/arch/aarch64/aarch64/pmap.c index c4085b9ae13b..7c7625e5b02d 100644 --- a/sys/arch/aarch64/aarch64/pmap.c +++ b/sys/arch/aarch64/aarch64/pmap.c @@ -2337,8 +2337,6 @@ _pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva, bool kremove, pdpremoved = _pmap_pdp_delref(pm, AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep)), true); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); - dsb(ish); - isb(); if (pdpremoved) { /* From 0189e77d9da6ad52dae8e42dd08cfd20c941559f Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 3 Mar 2023 23:00:57 +0000 Subject: [PATCH 5/5] aarch64: Omit needless DSB ISH; ISB in pmap_protect. Handled by pmap_update now. --- sys/arch/aarch64/aarch64/pmap.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/sys/arch/aarch64/aarch64/pmap.c b/sys/arch/aarch64/aarch64/pmap.c index 7c7625e5b02d..d0a9220b1719 100644 --- a/sys/arch/aarch64/aarch64/pmap.c +++ b/sys/arch/aarch64/aarch64/pmap.c @@ -1500,8 +1500,6 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) va); atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); - dsb(ish); - isb(); } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); @@ -1512,8 +1510,6 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) } else { atomic_swap_64(ptep, pte); AARCH64_TLBI_BY_ASID_VA(pai->pai_asid, va); - dsb(ish); - isb(); } }