Index: sys/arch/mips/mips/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/mips/mips/pmap.c,v retrieving revision 1.220 diff -u -p -r1.220 pmap.c --- sys/arch/mips/mips/pmap.c 5 Nov 2015 06:26:15 -0000 1.220 +++ sys/arch/mips/mips/pmap.c 25 Jun 2016 10:21:06 -0000 @@ -302,6 +301,7 @@ u_int pmap_page_colormask; (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) /* Forward function declarations */ +void pmap_page_remove(struct vm_page *); void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool); void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *, int); pt_entry_t *pmap_pte(pmap_t, vaddr_t); @@ -1054,6 +1054,10 @@ pmap_page_protect(struct vm_page *pg, vm while (pv != NULL) { const pmap_t pmap = pv->pv_pmap; const uint16_t gen = PG_MD_PVLIST_GEN(md); + if (pv->pv_va & PV_KENTER) { + pv = pv->pv_next; + continue; + } va = trunc_page(pv->pv_va); PG_MD_PVLIST_UNLOCK(md); pmap_protect(pmap, va, va + PAGE_SIZE, prot); @@ -1078,17 +1082,7 @@ pmap_page_protect(struct vm_page *pg, vm if (pmap_clear_mdpage_attributes(md, PG_MD_EXECPAGE)) { PMAP_COUNT(exec_uncached_page_protect); } - (void)PG_MD_PVLIST_LOCK(md, false); - pv = &md->pvh_first; - while (pv->pv_pmap != NULL) { - const pmap_t pmap = pv->pv_pmap; - va = trunc_page(pv->pv_va); - PG_MD_PVLIST_UNLOCK(md); - pmap_remove(pmap, va, va + PAGE_SIZE); - pmap_update(pmap); - (void)PG_MD_PVLIST_LOCK(md, false); - } - PG_MD_PVLIST_UNLOCK(md); + pmap_page_remove(pg); } } @@ -1237,11 +1231,7 @@ pmap_procwr(struct proc *p, vaddr_t va, unsigned entry; kpreempt_disable(); - if (pmap == pmap_kernel()) { - pte = kvtopte(va); - } else { - pte = pmap_pte_lookup(pmap, va); - } + pte = pmap_pte(pmap, va); entry = pte->pt_entry; kpreempt_enable(); if (!mips_pg_v(entry)) @@ -2060,6 +2050,32 @@ pmap_set_modified(paddr_t pa) /******************** pv_entry management ********************/ static void +pmap_check_alias(struct vm_page *pg) +{ +#ifdef MIPS3_PLUS /* XXX mmu XXX */ +#ifndef MIPS3_NO_PV_UNCACHED + struct vm_page_md * const md = VM_PAGE_TO_MD(pg); + + if (MIPS_HAS_R4K_MMU && PG_MD_UNCACHED_P(md)) { + /* + * Page is currently uncached, check if alias mapping has been + * removed. If it was, then reenable caching. + */ + pv_entry_t pv = &md->pvh_first; + pv_entry_t pv0 = pv->pv_next; + + for (; pv0; pv0 = pv0->pv_next) { + if (mips_cache_badalias(pv->pv_va, pv0->pv_va)) + break; + } + if (pv0 == NULL) + pmap_page_cache(pg, true); + } +#endif +#endif /* MIPS3_PLUS */ +} + +static void pmap_check_pvlist(struct vm_page_md *md) { #ifdef PARANOIADIAG @@ -2276,6 +2292,110 @@ again: } /* + * Remove this page from all physical maps in which it resides. + * Reflects back modify bits to the pager. + */ +void +pmap_page_remove(struct vm_page *pg) +{ + struct vm_page_md * const md = VM_PAGE_TO_MD(pg); + + (void)PG_MD_PVLIST_LOCK(md, true); + pmap_check_pvlist(md); + + pv_entry_t pv = &md->pvh_first; + if (pv->pv_pmap == NULL) { + PG_MD_PVLIST_UNLOCK(md); + return; + } + + pv_entry_t npv; + pv_entry_t pvp = NULL; + + /* earlier? */ + kpreempt_disable(); + for (; pv != NULL; pv = npv) { + npv = pv->pv_next; + if (pv->pv_va & PV_KENTER) { +#ifdef DEBUG + if (1 || (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))) { + printf("%s: %p %p, %"PRIxVADDR" skip\n", + __func__, pv, pv->pv_pmap, pv->pv_va); + } +#endif + /* + * pvp is non-null when we already have a PV_KENTER + * pv in pvh_first; otherwise we haven't seen a + * PV_KENTER pv and we need to copy this one to + * pvh_first + */ + if (pvp) { + /* + * The previous PV_KENTER pv needs to point to + * this PV_KENTER pv + */ + pvp->pv_next = pv; + } else { + pv_entry_t fpv = &md->pvh_first; + *fpv = *pv; + } + /* Assume no more - it'll get fix if there are more */ + pv->pv_next = NULL; + pvp = pv; + continue; + } + + + const pmap_t pmap = pv->pv_pmap; + vaddr_t va = trunc_page(pv->pv_va); + pt_entry_t *pte = pmap_pte(pmap, va); + +#ifdef DEBUG + if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) { + printf("%s: %p %p, %"PRIxVADDR", %p\n", + __func__, pv, pmap, va, pte); + } +#endif + + KASSERT(pte); + if (mips_pg_wired(pte->pt_entry)) + pmap->pm_stats.wired_count--; + pmap->pm_stats.resident_count--; + + if (pmap == pmap_kernel()) { + if (MIPS_HAS_R4K_MMU) + /* See above about G bit */ + pte->pt_entry = MIPS3_PG_NV | MIPS3_PG_G; + else + pte->pt_entry = MIPS1_PG_NV; + } else { + pte->pt_entry = mips_pg_nv_bit(); + } + /* + * Flush the TLB for the given address. + */ + pmap_tlb_invalidate_addr(pmap, va); + + /* + * non-null means this is a non-pvh_first pv, so we should + * free it. + */ + if (pvp) { + pmap_pv_free(pv); + } else { + pv->pv_pmap = NULL; + pv->pv_next = NULL; + } + } + pmap_check_alias(pg); + + pmap_check_pvlist(md); + + kpreempt_enable(); + PG_MD_PVLIST_UNLOCK(md); +} + +/* * Remove a physical to virtual address translation. * If cache was inhibited on this page, and there are no more cache * conflicts, restore caching. @@ -2330,25 +2450,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, pv->pv_next = npv->pv_next; } } -#ifdef MIPS3_PLUS /* XXX mmu XXX */ -#ifndef MIPS3_NO_PV_UNCACHED - if (MIPS_HAS_R4K_MMU && PG_MD_UNCACHED_P(md)) { - /* - * Page is currently uncached, check if alias mapping has been - * removed. If it was, then reenable caching. - */ - pv = &md->pvh_first; - pv_entry_t pv0 = pv->pv_next; - - for (; pv0; pv0 = pv0->pv_next) { - if (mips_cache_badalias(pv->pv_va, pv0->pv_va)) - break; - } - if (pv0 == NULL) - pmap_page_cache(pg, true); - } -#endif -#endif /* MIPS3_PLUS */ + pmap_check_alias(pg); pmap_check_pvlist(md); PG_MD_PVLIST_UNLOCK(md); Index: sys/arch/mips/mips/pmap_tlb.c =================================================================== RCS file: /cvsroot/src/sys/arch/mips/mips/pmap_tlb.c,v retrieving revision 1.10 diff -u -p -r1.10 pmap_tlb.c --- sys/arch/mips/mips/pmap_tlb.c 11 Jun 2015 15:15:27 -0000 1.10 +++ sys/arch/mips/mips/pmap_tlb.c 25 Jun 2016 10:21:08 -0000 @@ -633,6 +633,7 @@ pmap_tlb_update_addr(pmap_t pm, vaddr_t TLBINFO_LOCK(ti); if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) { + KASSERT((pm == pmap_kernel()) == (pai->pai_asid == 0)); va |= pai->pai_asid << MIPS_TLB_PID_SHIFT; pmap_tlb_asid_check(); rv = tlb_update(va, pt_entry); @@ -656,6 +657,7 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd TLBINFO_LOCK(ti); if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) { + KASSERT((pm == pmap_kernel()) == (pai->pai_asid == 0)); va |= pai->pai_asid << MIPS_TLB_PID_SHIFT; pmap_tlb_asid_check(); tlb_invalidate_addr(va);