--- sys.copy/arch/mips/include/pte.h 2016-07-07 15:07:54.000000000 +0100 +++ sys/arch/mips/include/pte.h 2016-07-06 12:24:40.000000000 +0100 @@ -396,7 +396,7 @@ pa &= ~(PGC_NOCACHE|PGC_PREFETCH); #endif -#if defined(cobalt) || defined(newsmips) || defined(pmax) /* otherwise ok */ +#if defined(Xcobalt) || defined(newsmips) || defined(pmax) /* otherwise ok */ /* this is not error in general. */ KASSERTMSG((pa & 0x8000000) == 0, "%#"PRIxPADDR, pa); #endif --- sys.copy/arch/mips/mips/pmap_machdep.c 2016-07-07 15:07:54.000000000 +0100 +++ sys/arch/mips/mips/pmap_machdep.c 2016-07-08 09:25:27.000000000 +0100 @@ -894,15 +896,21 @@ * When this address is touched again, the uvm will * fault it in. Because of this, each page will only * be mapped with one index at any given time. - */ - if (mips_cache_badalias(pv->pv_va, va)) { - vaddr_t nva = trunc_page(pv->pv_va); - pmap_t pmap = pv->pv_pmap; - VM_PAGEMD_PVLIST_UNLOCK(mdpg); - pmap_remove(pmap, nva, nva + PAGE_SIZE); - pmap_update(pmap); - (void)VM_PAGEMD_PVLIST_LOCK(mdpg); - return true; + * + * We need to deal with all entries on the list - if the first is + * incompatible with the new mapping then they all will be. + */ + if (mips_cache_badalias(nva, va)) { + for (pv_entry_t npv = pv; npv; npv = npv->pv_next) { + vaddr_t nva = trunc_page(npv->pv_va); + /* XXXNH PV_KENTER skip */ + pmap_t npm = npv->pv_pmap; + VM_PAGEMD_PVLIST_UNLOCK(mdpg); + pmap_remove(npm, nva, nva + PAGE_SIZE); + pmap_update(npm); + (void)VM_PAGEMD_PVLIST_LOCK(mdpg); + } + return true; } return false; #else /* !PMAP_NO_PV_UNCACHED */ @@ -970,8 +978,8 @@ * Page is currently uncached, check if alias mapping has been * removed. If it was, then reenable caching. */ - pv_entry_t pv = &mdpg->mdpg_first; (void)VM_PAGEMD_PVLIST_READLOCK(mdpg); + pv_entry_t pv = &mdpg->mdpg_first; pv_entry_t pv0 = pv->pv_next; for (; pv0; pv0 = pv0->pv_next) { --- sys.copy/arch/mips/mips/trap.c 2016-07-07 15:07:54.000000000 +0100 +++ sys/arch/mips/mips/trap.c 2016-07-08 09:26:12.000000000 +0100 @@ -298,11 +298,12 @@ // we try to update it, we better have done it. KASSERTMSG(pte_valid_p(pte), "%#"PRIx32, pte_value(pte)); vaddr = trunc_page(vaddr); - int __diagused ok = pmap_tlb_update_addr(pmap, vaddr, pte, 0); + int ok = pmap_tlb_update_addr(pmap, vaddr, pte, 0); kpreempt_enable(); - KASSERTMSG(ok == 1, "pmap_tlb_update_addr(%p,%#"PRIxVADDR - ",%#"PRIxPTE", 0) returned %d", - pmap, vaddr, pte_value(pte), ok); + if (ok != 1) + printf("pmap_tlb_update_addr(%p,%#" + PRIxVADDR",%#"PRIxPTE", 0) returned %d", + pmap, vaddr, pte_value(pte), ok); paddr_t pa = pte_to_paddr(pte); KASSERTMSG(uvm_pageismanaged(pa), "%#"PRIxVADDR" pa %#"PRIxPADDR, vaddr, pa); @@ -371,14 +372,14 @@ #ifdef PMAP_FAULTINFO if (p->p_pid == pfi->pfi_lastpid && va == pfi->pfi_faultaddr) { - if (++pfi->pfi_repeats > 4) { + if (++pfi->pfi_repeats > 500) { tlb_asid_t asid = tlb_get_asid(); pt_entry_t *ptep = pfi->pfi_faultpte; printf("trap: fault #%u (%s/%s) for %#"PRIxVADDR" (%#"PRIxVADDR") at pc %#"PRIxVADDR" curpid=%u/%u ptep@%p=%#"PRIxPTE")\n", pfi->pfi_repeats, trap_names[TRAPTYPE(cause)], trap_names[pfi->pfi_faulttype], va, vaddr, pc, map->pmap->pm_pai[0].pai_asid, asid, ptep, ptep ? pte_value(*ptep) : 0); if (pfi->pfi_repeats >= 4) { cpu_Debugger(); } else { - pfi->pfi_faulttype = TRAPTYPE(cause); + pfi->pfi_faulttype = TRAPTYPE(cause); } } } else { @@ -399,12 +400,12 @@ } pcb->pcb_onfault = onfault; -#if defined(VMFAULT_TRACE) && 1 +#if defined(VMFAULT_TRACE) if (!KERNLAND_P(va)) - printf( - "uvm_fault(%p (pmap %p), %#"PRIxVADDR - " (%"PRIxVADDR"), %d) -> %d at pc %#"PRIxVADDR"\n", - map, vm->vm_map.pmap, va, vaddr, ftype, rv, pc); + printf( + "uvm_fault(%p (pmap %p), %#"PRIxVADDR + " (%"PRIxVADDR"), %d) -> %d at pc %#"PRIxVADDR"\n", + map, vm->vm_map.pmap, va, vaddr, ftype, rv, pc); #endif /* * If this was a stack access we keep track of the maximum --- sys.copy/arch/powerpc/include/booke/pmap.h 2016-07-07 15:07:54.000000000 +0100 +++ sys/arch/powerpc/include/booke/pmap.h 2016-07-06 16:29:58.000000000 +0100 @@ -120,7 +120,7 @@ return (paddr_t) -1; } -#ifdef __PMAP_PRIVATEx +#ifdef __PMAP_PRIVATE /* * Virtual Cache Alias helper routines. Not a problem for Booke CPUs. */ --- sys.copy/uvm/pmap/pmap.c 2016-07-07 15:07:54.000000000 +0100 +++ sys/uvm/pmap/pmap.c 2016-07-08 09:27:42.000000000 +0100 @@ -246,6 +250,7 @@ (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap) /* Forward function declarations */ +void pmap_page_remove(struct vm_page *); static void pmap_pvlist_check(struct vm_page_md *); void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool); void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int); @@ -699,6 +704,119 @@ } /* + * Remove this page from all physical maps in which it resides. + * Reflects back modify bits to the pager. + */ +void +pmap_page_remove(struct vm_page *pg) +{ + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + + kpreempt_disable(); + VM_PAGEMD_PVLIST_LOCK(mdpg); + pmap_pvlist_check(mdpg); + + UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); + + pv_entry_t pv = &mdpg->mdpg_first; + if (pv->pv_pmap == NULL) { + VM_PAGEMD_PVLIST_UNLOCK(mdpg); + kpreempt_enable(); + UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0); + return; + } + + pv_entry_t npv; + pv_entry_t pvp = NULL; + + for (; pv != NULL; pv = npv) { + npv = pv->pv_next; +#ifdef PMAP_VIRTUAL_CACHE_ALIASES + if (pv->pv_va & PV_KENTER) { + UVMHIST_LOG(pmaphist, " pv %p pmap %p va %" + PRIxVADDR" skip", pv, pv->pv_pmap, pv->pv_va, 0); + + KASSERT(pv->pv_pmap == pmap_kernel()); + + /* Assume no more - it'll get fixed if there are */ + pv->pv_next = NULL; + + /* + * pvp is non-null when we already have a PV_KENTER + * pv in pvh_first; otherwise we haven't seen a + * PV_KENTER pv and we need to copy this one to + * pvh_first + */ + if (pvp) { + /* + * The previous PV_KENTER pv needs to point to + * this PV_KENTER pv + */ + pvp->pv_next = pv; + } else { + pv_entry_t fpv = &mdpg->mdpg_first; + *fpv = *pv; + KASSERT(fpv->pv_pmap == pmap_kernel()); + } + pvp = pv; + continue; + } +#endif + const pmap_t pmap = pv->pv_pmap; + vaddr_t va = trunc_page(pv->pv_va); + pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); + KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, + pmap_limits.virtual_end); + pt_entry_t pte = *ptep; + UVMHIST_LOG(pmaphist, " pv %p pmap %p va %"PRIxVADDR + " pte %#"PRIxPTE, pv, pmap, va, pte_value(pte)); + if (!pte_valid_p(pte)) + continue; + const bool is_kernel_pmap_p = (pmap == pmap_kernel()); + if (is_kernel_pmap_p) { + PMAP_COUNT(remove_kernel_pages); + } else { + PMAP_COUNT(remove_user_pages); + } + if (pte_wired_p(pte)) + pmap->pm_stats.wired_count--; + pmap->pm_stats.resident_count--; + + pmap_md_tlb_miss_lock_enter(); + const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p); + *ptep = npte; + /* + * Flush the TLB for the given address. + */ + pmap_tlb_invalidate_addr(pmap, va); + pmap_md_tlb_miss_lock_exit(); + + /* + * non-null means this is a non-pvh_first pv, so we should + * free it. + */ + if (pvp) { + KASSERT(pvp->pv_pmap == pmap_kernel()); + KASSERT(pvp->pv_next == NULL); + pmap_pv_free(pv); + } else { + pv->pv_pmap = NULL; + pv->pv_next = NULL; + } + } + +#ifdef PMAP_VIRTUAL_CACHE_ALIASES + /* Tell MD layer the bad aliases can't exist anymore ??? */ +#endif + pmap_pvlist_check(mdpg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); + kpreempt_enable(); + + UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); +} + + +/* * Make a previously active pmap (vmspace) inactive. */ void @@ -872,6 +990,12 @@ */ if (pv->pv_pmap != NULL) { while (pv != NULL) { +#ifdef PMAP_VIRTUAL_CACHE_ALIASES + if (pv->pv_va & PV_KENTER) { + pv = pv->pv_next; + continue; + } +#endif const pmap_t pmap = pv->pv_pmap; va = trunc_page(pv->pv_va); const uintptr_t gen = @@ -894,30 +1018,7 @@ /* remove_all */ default: - /* - * Do this first so that for each unmapping, pmap_remove_pv - * won't try to sync the icache. - */ - pv = &mdpg->mdpg_first; - kpreempt_disable(); - if (pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE)) { - UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR - "): execpage cleared", pg, VM_PAGE_TO_PHYS(pg),0,0); - PMAP_COUNT(exec_uncached_page_protect); - } - VM_PAGEMD_PVLIST_READLOCK(mdpg); - pmap_pvlist_check(mdpg); - while (pv->pv_pmap != NULL) { - const pmap_t pmap = pv->pv_pmap; - va = trunc_page(pv->pv_va); - VM_PAGEMD_PVLIST_UNLOCK(mdpg); - pmap_remove(pmap, va, va + PAGE_SIZE); - pmap_update(pmap); - VM_PAGEMD_PVLIST_READLOCK(mdpg); - } - pmap_pvlist_check(mdpg); - VM_PAGEMD_PVLIST_UNLOCK(mdpg); - kpreempt_enable(); + pmap_page_remove(pg); } UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); @@ -1077,13 +1178,20 @@ const bool is_kernel_pmap_p = (pmap == pmap_kernel()); u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0; #ifdef UVMHIST - struct kern_history * const histp = + struct kern_history * const histp = ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist); #endif UVMHIST_FUNC(__func__); UVMHIST_CALLED(*histp); #define VM_PROT_STRING(prot) \ - &"\0 (R)\0 (W)\0 (RW)\0 (X)\0 (RX)\0 (WX)\0 (RWX)\0"[UVM_PROTECTION(prot)*6] + &"\0 " \ + "(R)\0 " \ + "(W)\0 " \ + "(RW)\0 " \ + "(X)\0 " \ + "(RX)\0 " \ + "(WX)\0 " \ + "(RWX)\0"[UVM_PROTECTION(prot)*6] UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR, pmap, va, pa, 0); UVMHIST_LOG(*histp, "prot=%#x%s flags=%#x%s)", @@ -1560,9 +1668,10 @@ vaddr_t va = trunc_page(pv->pv_va); pv_next = pv->pv_next; +#ifdef PMAP_VIRTUAL_CACHE_ALIASES if (pv->pv_va & PV_KENTER) continue; - +#endif pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); KASSERT(ptep); pt_entry_t pte = pte_prot_nowrite(*ptep); @@ -1723,9 +1832,10 @@ pt_entry_t *ptep = pmap_pte_lookup(pmap, va); pt_entry_t pte = (ptep != NULL) ? *ptep : 0; if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa) - printf( - "pmap_enter_pv: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %#"PRIxPTE"\n", - va, pa, pte_value(pte)); + printf("%s: found va %#"PRIxVADDR + " pa %#"PRIxPADDR + " in pv_table but != %#"PRIxPTE"\n", + __func__, va, pa, pte_value(pte)); #endif PMAP_COUNT(remappings); VM_PAGEMD_PVLIST_UNLOCK(mdpg); @@ -1804,7 +1914,7 @@ UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); UVMHIST_LOG(pmaphist, - "(pmap=%p, va=%#"PRIxVADDR", pg=%p (pa %#"PRIxPADDR")\n", + "(pmap=%p, va=%#"PRIxVADDR", pg=%p (pa %#"PRIxPADDR")", pmap, va, pg, VM_PAGE_TO_PHYS(pg)); UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0, 0, 0);