Index: sys/arch/arm/arm/ast.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm/ast.c,v retrieving revision 1.24 diff -u -p -r1.24 ast.c --- sys/arch/arm/arm/ast.c 13 Aug 2014 21:41:32 -0000 1.24 +++ sys/arch/arm/arm/ast.c 24 Oct 2014 21:42:56 -0000 @@ -71,9 +71,6 @@ void ast(struct trapframe *); void userret(struct lwp *l) { - /* Invoke MI userret code */ - mi_userret(l); - #if defined(__PROG32) && defined(ARM_MMU_EXTENDED) /* * If our ASID got released, access via TTBR0 will have been disabled. @@ -83,8 +80,12 @@ userret(struct lwp *l) if (armreg_ttbcr_read() & TTBCR_S_PD0) { pmap_activate(l); } + KASSERT(!(armreg_ttbcr_read() & TTBCR_S_PD0)); #endif + /* Invoke MI userret code */ + mi_userret(l); + #if defined(__PROG32) && defined(DIAGNOSTIC) KASSERT(VALID_R15_PSR(lwp_trapframe(l)->tf_pc, lwp_trapframe(l)->tf_spsr)); Index: sys/arch/arm/arm/cpufunc_asm_armv7.S =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm/cpufunc_asm_armv7.S,v retrieving revision 1.18 diff -u -p -r1.18 cpufunc_asm_armv7.S --- sys/arch/arm/arm/cpufunc_asm_armv7.S 31 Jul 2014 06:26:06 -0000 1.18 +++ sys/arch/arm/arm/cpufunc_asm_armv7.S 24 Oct 2014 21:43:01 -0000 @@ -380,6 +380,7 @@ ENTRY_NP(armv7_dcache_inv_all) b 1b .Lnext_level_inv: + dsb mrc p15, 1, r0, c0, c0, 1 @ read CLIDR ubfx ip, r0, #24, #3 @ narrow to LoC add r3, r3, #2 @ go to next level @@ -440,6 +441,7 @@ ENTRY_NP(armv7_dcache_wbinv_all) b 1b .Lnext_level_wbinv: + dsb mrc p15, 1, r0, c0, c0, 1 @ read CLIDR ubfx ip, r0, #24, #3 @ narrow to LoC add r3, r3, #2 @ go to next level Index: sys/arch/arm/arm32/arm32_tlb.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/arm32_tlb.c,v retrieving revision 1.4 diff -u -p -r1.4 arm32_tlb.c --- sys/arch/arm/arm32/arm32_tlb.c 14 Oct 2014 20:35:03 -0000 1.4 +++ sys/arch/arm/arm32/arm32_tlb.c 24 Oct 2014 21:43:01 -0000 @@ -29,6 +29,8 @@ #include __KERNEL_RCSID(1, "$NetBSD: arm32_tlb.c,v 1.4 2014/10/14 20:35:03 matt Exp $"); +#include "opt_multiprocessor.h" + #include #include @@ -48,8 +50,9 @@ void tlb_set_asid(tlb_asid_t asid) { arm_dsb(); - if (asid == 0) { + if (asid == KERNEL_PID) { armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); + arm_isb(); } armreg_contextidr_write(asid); arm_isb(); @@ -118,6 +121,7 @@ tlb_invalidate_addr(vaddr_t va, tlb_asid #endif //armreg_tlbiall_write(asid); } + arm_dsb(); arm_isb(); } Index: sys/arch/arm/arm32/db_interface.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/db_interface.c,v retrieving revision 1.52 diff -u -p -r1.52 db_interface.c --- sys/arch/arm/arm32/db_interface.c 30 Mar 2014 08:00:34 -0000 1.52 +++ sys/arch/arm/arm32/db_interface.c 24 Oct 2014 21:43:03 -0000 @@ -39,6 +39,7 @@ __KERNEL_RCSID(0, "$NetBSD: db_interface #include "opt_ddb.h" #include "opt_kgdb.h" +#include "opt_multiprocessor.h" #include #include @@ -298,7 +299,7 @@ db_write_text(vaddr_t addr, size_t size, (vaddr_t) dst); return; } - cpu_tlb_flushD_SE(pgva); + cpu_tlb_flushD_SE(pgva, KERNEL_PID); cpu_cpwait(); if (limit > size) @@ -326,7 +327,7 @@ db_write_text(vaddr_t addr, size_t size, PTE_SYNC(pte); break; } - cpu_tlb_flushD_SE(pgva); + cpu_tlb_flushD_SE(pgva, KERNEL_PID); cpu_cpwait(); } while (size != 0); Index: sys/arch/arm/arm32/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v retrieving revision 1.304 diff -u -p -r1.304 pmap.c --- sys/arch/arm/arm32/pmap.c 20 Oct 2014 07:13:27 -0000 1.304 +++ sys/arch/arm/arm32/pmap.c 24 Oct 2014 21:43:12 -0000 @@ -247,7 +247,6 @@ int pmap_debug_level = 0; #define PDB_KREMOVE 0x40000 #define PDB_EXEC 0x80000 -int debugmap = 1; int pmapdebug = 0; #define NPDEBUG(_lev_,_stat_) \ if (pmapdebug & (_lev_)) \ @@ -666,8 +665,8 @@ __CTASSERT(L2_BUCKET_XFRAME == ~(L2_BUCK PR_NOWAIT, (pap))) /* - * We try to map the page tables write-through, if possible. However, not - * all CPUs have a write-through cache mode, so on those we have to sync + * We try to map the page tables write-through (wb?), if possible. However, not + * all CPUs have a write-through (wb?) cache mode, so on those we have to sync * the cache when we frob page tables. * * We try to evaluate this at compile time, if possible. However, it's @@ -1551,7 +1550,7 @@ pmap_alloc_l2_bucket(pmap_t pm, vaddr_t pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; pd_entry_t npde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); - KASSERT(*pdep == 0); + KASSERTMSG(*pdep == 0, "va %lx pdep %p *pdep %x npde %x\n", va, pdep, *pdep, npde); l1pte_setone(pdep, npde); PTE_SYNC(pdep); #endif @@ -2497,11 +2496,14 @@ pmap_clearbit(struct vm_page_md *md, pad } if (npte != opte) { - l2pte_set(ptep, npte, opte); + l2pte_reset(ptep); PTE_SYNC(ptep); /* Flush the TLB entry if a current pmap. */ pmap_tlb_flush_SE(pm, va, oflags); + + l2pte_set(ptep, npte, 0); + PTE_SYNC(ptep); } pmap_release_pmap_lock(pm); @@ -2656,6 +2658,10 @@ pmap_syncicache_page(struct vm_page_md * for (size_t i = 0, j = 0; i < way_size; i += PAGE_SIZE, j += PAGE_SIZE / L2_S_SIZE) { + /* XXXNH I did this */ + l2pte_reset(ptep + j); + PTE_SYNC(ptep + j); + pmap_tlb_flush_SE(kpm, dstp + i, PVF_REF | PVF_EXEC); /* * Set up a PTE with to flush these cache lines. @@ -2884,10 +2890,6 @@ pmap_page_remove(struct vm_page_md *md, pmap_release_page_lock(md); pmap_acquire_pmap_lock(pm); -#ifdef ARM_MMU_EXTENDED - pmap_tlb_invalidate_addr(pm, pv->pv_va); -#endif - l2b = pmap_get_l2_bucket(pm, pv->pv_va); KASSERTMSG(l2b != NULL, "%#lx", pv->pv_va); @@ -2910,6 +2912,12 @@ pmap_page_remove(struct vm_page_md *md, l2pte_reset(ptep); PTE_SYNC_CURRENT(pm, ptep); pmap_free_l2_bucket(pm, l2b, PAGE_SIZE / L2_S_SIZE); + +#ifdef ARM_MMU_EXTENDED + /* XXXNH pmap_tlb_flush_SE()? */ + pmap_tlb_invalidate_addr(pm, pv->pv_va); +#endif + pmap_release_pmap_lock(pm); pool_put(&pmap_pv_pool, pv); @@ -3279,6 +3287,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ #ifdef ARM_MMU_EXTENDED npte |= L2_XS_nG; /* user pages are not global */ #endif + } else { + KASSERT(!(npte & L2_XS_nG)); } /* @@ -3309,7 +3319,12 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ */ if (npte != opte) { - l2pte_set(ptep, npte, opte); + /* XXXNH I did this */ + l2pte_reset(ptep); + PTE_SYNC(ptep); + pmap_tlb_flush_SE(pm, va, oflags); + + l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); #ifndef ARM_MMU_EXTENDED bool is_cached = pmap_is_cached(pm); @@ -3330,6 +3345,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); if (*pdep != pde) { + /* XXXNH KASSERT *pde == 0 */ l1pte_setone(pdep, pde); PTE_SYNC(pdep); } @@ -3337,8 +3353,6 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ } #endif /* !ARM_MMU_EXTENDED */ - pmap_tlb_flush_SE(pm, va, oflags); - #ifndef ARM_MMU_EXTENDED UVMHIST_LOG(maphist, " is_cached %d cs 0x%08x\n", is_cached, pm->pm_cstate.cs_all, 0, 0); @@ -3482,6 +3496,8 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd */ l2pte_reset(ptep); PTE_SYNC_CURRENT(pm, ptep); + /* XXXNH I did this */ +// pmap_tlb_flush_SE(pm, sva, flags); continue; } @@ -3489,7 +3505,7 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd if (pm == pmap_kernel()) { l2pte_reset(ptep); PTE_SYNC(ptep); - pmap_tlb_flush_SE(pm, sva, flags); + pmap_tlb_flush_SE(pm, sva, flags); continue; } #endif @@ -3508,6 +3524,7 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd * Roll back the previous PTE list, * and zero out the current PTE. */ + /* XXXNH pmap_tlb_flush_SE()? */ for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { l2pte_reset(cleanlist[cnt].ptep); @@ -3532,6 +3549,9 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { total += cleanlist_idx; for (cnt = 0; cnt < cleanlist_idx; cnt++) { + /* XXXNH I did this */ + l2pte_reset(cleanlist[cnt].ptep); + PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); #ifdef ARM_MMU_EXTENDED vaddr_t clva = cleanlist[cnt].va; pmap_tlb_flush_SE(pm, clva, PVF_REF); @@ -3548,8 +3568,6 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd PVF_REF | flags); } #endif /* ARM_MMU_EXTENDED */ - l2pte_reset(cleanlist[cnt].ptep); - PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); } /* @@ -3683,10 +3701,12 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v } #endif if (l2pte_valid_p(opte)) { + l2pte_reset(ptep); + PTE_SYNC(ptep); #ifdef PMAP_CACHE_VIVT cpu_dcache_wbinv_range(va, PAGE_SIZE); #endif - cpu_tlb_flushD_SE(va); + pmap_tlb_flush_SE(kpm, va, PVF_REF); cpu_cpwait(); } } @@ -3700,7 +3720,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v if (prot & VM_PROT_EXECUTE) npte &= ~L2_XS_XN; #endif - l2pte_set(ptep, npte, opte); + l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); if (pg) { @@ -3791,7 +3811,8 @@ pmap_kremove(vaddr_t va, vsize_t len) if (next_bucket > eva) next_bucket = eva; - struct l2_bucket * const l2b = pmap_get_l2_bucket(pmap_kernel(), va); + pmap_t kpm = pmap_kernel(); + struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); KDASSERT(l2b != NULL); pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; @@ -3826,13 +3847,14 @@ pmap_kremove(vaddr_t va, vsize_t len) } } if (l2pte_valid_p(opte)) { + l2pte_reset(ptep); + PTE_SYNC(ptep); #ifdef PMAP_CACHE_VIVT cpu_dcache_wbinv_range(va, PAGE_SIZE); #endif - cpu_tlb_flushD_SE(va); - } - if (opte) { - l2pte_reset(ptep); + /* XXXNH I did this */ + pmap_tlb_flush_SE(kpm, va, PVF_REF); + mappings += PAGE_SIZE / L2_S_SIZE; } va += PAGE_SIZE; @@ -3841,7 +3863,7 @@ pmap_kremove(vaddr_t va, vsize_t len) KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", mappings, l2b->l2b_occupancy); l2b->l2b_occupancy -= mappings; - PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); + //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); #ifdef UVMHIST total_mappings += mappings; #endif @@ -3871,13 +3893,13 @@ pmap_extract(pmap_t pm, vaddr_t va, padd * These should only happen for pmap_kernel() */ KDASSERT(pm == pmap_kernel()); - pmap_release_pmap_lock(pm); #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 if (l1pte_supersection_p(pde)) { pa = (pde & L1_SS_FRAME) | (va & L1_SS_OFFSET); } else #endif pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); + pmap_release_pmap_lock(pm); } else { /* * Note that we can't rely on the validity of the L1 @@ -3940,9 +3962,11 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad pmap_acquire_pmap_lock(pm); +#ifndef ARM_MMU_EXTENDED const bool flush = eva - sva >= PAGE_SIZE * 4; - u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); u_int flags = 0; +#endif + u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); while (sva < eva) { next_bucket = L2_NEXT_BUCKET_VA(sva); @@ -3961,7 +3985,9 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad const pt_entry_t opte = *ptep; if (l2pte_valid_p(opte) && l2pte_writable_p(opte)) { struct vm_page *pg; +#ifndef ARM_MMU_EXTENDED u_int f; +#endif #ifdef PMAP_CACHE_VIVT /* @@ -3975,7 +4001,12 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad pg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); pt_entry_t npte = l2pte_set_readonly(opte); - l2pte_set(ptep, npte, opte); + l2pte_reset(ptep); + PTE_SYNC(ptep); +#ifdef ARM_MMU_EXTENDED + pmap_tlb_flush_SE(pm, sva, PVF_REF); +#endif + l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); if (pg != NULL) { @@ -3983,10 +4014,14 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad paddr_t pa = VM_PAGE_TO_PHYS(pg); pmap_acquire_page_lock(md); - f = pmap_modify_pv(md, pa, pm, sva, - clr_mask, 0); +#ifndef ARM_MMU_EXTENDED + f = +#endif + pmap_modify_pv(md, pa, pm, sva, + clr_mask, 0); pmap_vac_me_harder(md, pa, pm, sva); pmap_release_page_lock(md); +#ifndef ARM_MMU_EXTENDED } else { f = PVF_REF | PVF_EXEC; } @@ -3995,6 +4030,7 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad flags |= f; } else { pmap_tlb_flush_SE(pm, sva, f); +#endif } } @@ -4003,6 +4039,7 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad } } +#ifndef ARM_MMU_EXTENDED if (flush) { if (PV_BEEN_EXECD(flags)) { pmap_tlb_flushID(pm); @@ -4010,6 +4047,7 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad pmap_tlb_flushD(pm); } } +#endif pmap_release_pmap_lock(pm); } @@ -4208,12 +4246,15 @@ pmap_prefetchabt_fixup(void *v) KASSERT(pv != NULL); if (PV_IS_EXEC_P(pv->pv_flags)) { + l2pte_reset(ptep); + PTE_SYNC(ptep); + pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); if (!PV_IS_EXEC_P(md->pvh_attrs)) { pmap_syncicache_page(md, pa); } rv = ABORT_FIXUP_RETURN; - l2pte_set(ptep, opte & ~L2_XS_XN, opte); - pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); + l2pte_set(ptep, opte & ~L2_XS_XN, 0); + PTE_SYNC(ptep); } pmap_release_page_lock(md); @@ -4359,7 +4400,11 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, | (pm != pmap_kernel() ? L2_XS_nG : 0) #endif | 0; - l2pte_set(ptep, npte, opte); + l2pte_reset(ptep); + PTE_SYNC(ptep); + pmap_tlb_flush_SE(pm, va, + (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); + l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); PMAPCOUNT(fixup_mod); rv = 1; @@ -4397,6 +4442,8 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, #ifdef ARM_MMU_EXTENDED if (pm != pmap_kernel()) { npte |= L2_XS_nG; + } else { + KASSERT(!(npte & L2_XS_nG)); } /* * If we got called from prefetch abort, then ftype will have @@ -4426,7 +4473,11 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, } #endif /* ARM_MMU_EXTENDED */ pmap_release_page_lock(md); - l2pte_set(ptep, npte, opte); + l2pte_reset(ptep); + PTE_SYNC(ptep); + pmap_tlb_flush_SE(pm, va, + (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); + l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); PMAPCOUNT(fixup_ref); rv = 1; @@ -4464,7 +4515,10 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, * Turn off no-execute. */ KASSERT(opte & L2_XS_nG); - l2pte_set(ptep, opte & ~L2_XS_XN, opte); + l2pte_reset(ptep); + PTE_SYNC(ptep); + pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); + l2pte_set(ptep, opte & ~L2_XS_XN, 0); PTE_SYNC(ptep); rv = 1; PMAPCOUNT(fixup_exec); @@ -4481,6 +4535,7 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, pd_entry_t * const pdep = pmap_l1_kva(pm) + l1slot; pd_entry_t pde = L1_C_PROTO | l2b->l2b_pa | L1_C_DOM(pmap_domain(pm)); if (*pdep != pde) { + /* XXXNH KASSERT *pdep == 0 ??? */ l1pte_setone(pdep, pde); PTE_SYNC(pdep); rv = 1; @@ -4612,9 +4667,6 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, } #endif - pmap_tlb_flush_SE(pm, va, - (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); - rv = 1; out: @@ -4873,7 +4925,7 @@ pmap_deactivate(struct lwp *l) #ifdef ARM_MMU_EXTENDED kpreempt_disable(); struct cpu_info * const ci = curcpu(); - struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); + //struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); /* * Disable translation table walks from TTBR0 while no pmap has been * activated. @@ -4882,7 +4934,7 @@ pmap_deactivate(struct lwp *l) armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); arm_isb(); pmap_tlb_asid_deactivate(pm); - cpu_setttb(pmap_kernel()->pm_l1_pa, pai->pai_asid); + cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID); ci->ci_pmap_cur = pmap_kernel(); kpreempt_enable(); #else @@ -4922,6 +4974,11 @@ pmap_update(pmap_t pm) } #ifdef ARM_MMU_EXTENDED +#if defined(MULTIPROCESSOR) + armreg_bpiallis_write(0); +#else + armreg_bpiall_write(0); +#endif #if defined(MULTIPROCESSOR) && PMAP_MAX_TLB > 1 u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); @@ -5144,7 +5201,7 @@ pmap_zero_page_generic(paddr_t pa) | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); - cpu_tlb_flushD_SE(vdstp); + cpu_tlb_flushD_SE(vdstp, KERNEL_PID); cpu_cpwait(); #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) \ && !defined(ARM_MMU_EXTENDED) @@ -5165,7 +5222,7 @@ pmap_zero_page_generic(paddr_t pa) */ l2pte_reset(ptep); PTE_SYNC(ptep); - cpu_tlb_flushD_SE(vdstp); + cpu_tlb_flushD_SE(vdstp, KERNEL_PID); #ifdef PMAP_CACHE_VIVT cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); #endif @@ -5272,7 +5329,7 @@ pmap_pageidlezero(paddr_t pa) L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); - cpu_tlb_flushD_SE(vdstp); + cpu_tlb_flushD_SE(vdstp, KERNEL_PID); cpu_cpwait(); } @@ -5320,7 +5377,7 @@ pmap_pageidlezero(paddr_t pa) if (!okcolor) { l2pte_reset(ptep); PTE_SYNC(ptep); - cpu_tlb_flushD_SE(vdstp); + cpu_tlb_flushD_SE(vdstp, KERNEL_PID); } return rv; @@ -5412,7 +5469,7 @@ pmap_copy_page_generic(paddr_t src, padd | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); l2pte_set(src_ptep, nsrc_pte, 0); PTE_SYNC(src_ptep); - cpu_tlb_flushD_SE(vsrcp); + cpu_tlb_flushD_SE(vsrcp, KERNEL_PID); cpu_cpwait(); } if (!dst_okcolor) { @@ -5420,7 +5477,7 @@ pmap_copy_page_generic(paddr_t src, padd L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; l2pte_set(dst_ptep, ndst_pte, 0); PTE_SYNC(dst_ptep); - cpu_tlb_flushD_SE(vdstp); + cpu_tlb_flushD_SE(vdstp, KERNEL_PID); cpu_cpwait(); #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) /* @@ -5444,13 +5501,13 @@ pmap_copy_page_generic(paddr_t src, padd if (!src_okcolor) { l2pte_reset(src_ptep); PTE_SYNC(src_ptep); - cpu_tlb_flushD_SE(vsrcp); + cpu_tlb_flushD_SE(vsrcp, KERNEL_PID); cpu_cpwait(); } if (!dst_okcolor) { l2pte_reset(dst_ptep); PTE_SYNC(dst_ptep); - cpu_tlb_flushD_SE(vdstp); + cpu_tlb_flushD_SE(vdstp, KERNEL_PID); cpu_cpwait(); } #ifdef PMAP_CACHE_VIPT @@ -5700,6 +5757,7 @@ pmap_growkernel(vaddr_t maxkvaddr) if (maxkvaddr <= pmap_curmaxkvaddr) goto out; /* we are OK */ +printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",pmap_curmaxkvaddr, maxkvaddr); NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", @@ -5733,6 +5791,8 @@ pmap_growkernel(vaddr_t maxkvaddr) | L1_C_DOM(PMAP_DOMAIN_KERNEL); #ifdef ARM_MMU_EXTENDED l1pte_setone(pdep, npde); + /* XXXNH ??? */ + PDE_SYNC(pdep); #else /* Distribute new L1 entry to all other L1s */ SLIST_FOREACH(l1, &l1_list, l1_link) { @@ -6618,6 +6678,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, #ifdef VERBOSE_INIT_ARM printf("sS"); #endif +#ifdef ARM_MMU_EXTENDED + KASSERT(!(npde & L1_S_V6_nG)); +#endif l1pte_set(&pdep[l1slot], npde); PDE_SYNC_RANGE(&pdep[l1slot], L1_SS_SIZE / L1_S_SIZE); va += L1_SS_SIZE; @@ -6640,6 +6703,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, #ifdef VERBOSE_INIT_ARM printf("S"); #endif +#ifdef ARM_MMU_EXTENDED + KASSERT(!(npde & L1_S_V6_nG)); +#endif l1pte_set(&pdep[l1slot], npde); PDE_SYNC(&pdep[l1slot]); va += L1_S_SIZE; @@ -6676,6 +6742,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, #ifdef VERBOSE_INIT_ARM printf("L"); #endif +#ifdef ARM_MMU_EXTENDED + KASSERT(!(npte & L2_XS_nG)); +#endif l2pte_set(ptep, npte, 0); PTE_SYNC_RANGE(ptep, L2_L_SIZE / L2_S_SIZE); va += L2_L_SIZE; @@ -6696,6 +6765,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, | (va & 0x80000000 ? 0 : L2_XS_nG) #endif | L2_S_PROT(PTE_KERNEL, prot) | f2s; +#ifdef ARM_MMU_EXTENDED + KASSERT(!(npte & L2_XS_nG)); +#endif l2pte_set(ptep, npte, 0); PTE_SYNC(ptep); va += PAGE_SIZE; Index: sys/arch/arm/include/cpufunc.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/cpufunc.h,v retrieving revision 1.74 diff -u -p -r1.74 cpufunc.h --- sys/arch/arm/include/cpufunc.h 20 Apr 2014 16:06:05 -0000 1.74 +++ sys/arch/arm/include/cpufunc.h 24 Oct 2014 21:43:12 -0000 @@ -72,11 +72,23 @@ struct cpu_functions { /* TLB functions */ void (*cf_tlb_flushID) (void); +#if defined(ARM_MMU_EXTENDED) + void (*cf_tlb_flushID_SE) (vaddr_t, tlb_asid_t); +#else void (*cf_tlb_flushID_SE) (vaddr_t); +#endif void (*cf_tlb_flushI) (void); +#if defined(ARM_MMU_EXTENDED) + void (*cf_tlb_flushI_SE) (vaddr_t, tlb_asid_t); +#else void (*cf_tlb_flushI_SE) (vaddr_t); +#endif void (*cf_tlb_flushD) (void); +#if defined(ARM_MMU_EXTENDED) + void (*cf_tlb_flushD_SE) (vaddr_t, tlb_asid_t); +#else void (*cf_tlb_flushD_SE) (vaddr_t); +#endif /* * Cache operations: @@ -178,11 +190,18 @@ extern u_int cputype; #define cpu_faultaddress() cpufuncs.cf_faultaddress() #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() -#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) #define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI() -#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e) #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() + +#ifdef ARM_MMU_EXTENDED +#define cpu_tlb_flushD_SE(e,a) cpufuncs.cf_tlb_flushD_SE((e),(a)) +#define cpu_tlb_flushI_SE(e,a) cpufuncs.cf_tlb_flushI_SE((e),(a)) +#define cpu_tlb_flushID_SE(e,a) cpufuncs.cf_tlb_flushID_SE((e),(a)) +#else #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) +#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e) +#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) +#endif #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all() #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) Index: sys/arch/arm/include/cpufunc_proto.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/cpufunc_proto.h,v retrieving revision 1.3 diff -u -p -r1.3 cpufunc_proto.h --- sys/arch/arm/include/cpufunc_proto.h 13 Sep 2014 17:41:03 -0000 1.3 +++ sys/arch/arm/include/cpufunc_proto.h 24 Oct 2014 21:43:12 -0000 @@ -265,9 +265,15 @@ void arm11_setup (char *string); void arm11_tlb_flushID (void); void arm11_tlb_flushI (void); void arm11_tlb_flushD (void); +#if defined(ARM_MMU_EXTENDED) +void arm11_tlb_flushID_SE (vaddr_t, tlb_asid_t); +void arm11_tlb_flushI_SE (vaddr_t, tlb_asid_t); +void arm11_tlb_flushD_SE (vaddr_t, tlb_asid_t); +#else void arm11_tlb_flushID_SE (vaddr_t); void arm11_tlb_flushI_SE (vaddr_t); void arm11_tlb_flushD_SE (vaddr_t); +#endif void armv11_dcache_wbinv_all (void); void armv11_idcache_wbinv_all(void); @@ -293,9 +299,17 @@ void armv6_idcache_wbinv_range (vaddr_t, #if defined(ARM_MMU_EXTENDED) void armv7_setttb(u_int, tlb_asid_t); void armv7_context_switch(u_int, tlb_asid_t); + +void armv7_tlb_flushID_SE(vaddr_t, tlb_asid_t); +void armv7_tlb_flushI_SE(vaddr_t, tlb_asid_t); +void armv7_tlb_flushD_SE(vaddr_t, tlb_asid_t); #else void armv7_setttb(u_int, bool); void armv7_context_switch(u_int); + +void armv7_tlb_flushID_SE(vaddr_t); +void armv7_tlb_flushI_SE(vaddr_t); +void armv7_tlb_flushD_SE(vaddr_t); #endif void armv7_icache_sync_range(vaddr_t, vsize_t); @@ -313,10 +327,6 @@ void armv7_tlb_flushID(void); void armv7_tlb_flushI(void); void armv7_tlb_flushD(void); -void armv7_tlb_flushID_SE(vaddr_t); -void armv7_tlb_flushI_SE(vaddr_t); -void armv7_tlb_flushD_SE(vaddr_t); - void armv7_cpu_sleep(int); void armv7_drain_writebuf(void); void armv7_setup(char *string); Index: sys/uvm/pmap/pmap_tlb.c =================================================================== RCS file: /cvsroot/src/sys/uvm/pmap/pmap_tlb.c,v retrieving revision 1.9 diff -u -p -r1.9 pmap_tlb.c --- sys/uvm/pmap/pmap_tlb.c 18 Oct 2014 09:54:19 -0000 1.9 +++ sys/uvm/pmap/pmap_tlb.c 24 Oct 2014 21:43:22 -0000 @@ -935,7 +935,7 @@ pmap_tlb_asid_deactivate(pmap_t pm) #endif curcpu()->ci_pmap_asid_cur = 0; UVMHIST_LOG(maphist, " <-- done (pm=%#x)", pm, 0, 0, 0); - tlb_set_asid(0); + tlb_set_asid(KERNEL_PID); #if defined(DEBUG) pmap_tlb_asid_check(); #endif