diff --git a/sys/uvm/pmap/pmap.c b/sys/uvm/pmap/pmap.c index f8bd023..fe1408b 100644 --- a/sys/uvm/pmap/pmap.c +++ b/sys/uvm/pmap/pmap.c @@ -189,9 +189,31 @@ PMAP_COUNTER(page_protect, "page_protects"); #define PMAP_ASID_RESERVED 0 CTASSERT(PMAP_ASID_RESERVED == 0); +#ifdef PMAP_HWPAGEWALKER +#ifndef PMAP_PDETAB_ALIGN +#define PMAP_PDETAB_ALIGN /* nothing */ +#endif + +#ifdef _LP64 +pmap_pdetab_t pmap_kstart_pdetab PMAP_PDETAB_ALIGN; /* first mid-level pdetab for kernel */ +#endif +pmap_pdetab_t pmap_kern_pdetab PMAP_PDETAB_ALIGN = { /* top level pdetab for kernel */ +#ifdef _LP64 + /* TODO: This seems wrong. Index out of bounds on RISCV at least. + .pde_pde[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = + &pmap_kstart_pdetab, + */ + 0 /* Just zero it for now until the above is fixed */ +#endif +}; + +#endif + +#if !defined(PMAP_HWPAGEWALKER) || !defined(POOL_PHYSTOV) #ifndef PMAP_SEGTAB_ALIGN #define PMAP_SEGTAB_ALIGN /* nothing */ #endif + #ifdef _LP64 pmap_segtab_t pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */ #endif @@ -200,11 +222,18 @@ pmap_segtab_t pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for ker .seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab, #endif }; +#endif struct pmap_kernel kernel_pmap_store = { .kernel_pmap = { .pm_count = 1, - .pm_segtab = &pmap_kern_segtab, +#ifdef PMAP_HWPAGEWALKER + .pm_pdetab = PMAP_INVALID_PDETAB_ADDRESS, +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(POOL_PHYSTOV) + .pm_segtab = PMAP_INVALID_SEGTAB_ADDRESS, + //.pm_segtab = &pmap_kern_segtab, +#endif .pm_minaddr = VM_MIN_KERNEL_ADDRESS, .pm_maxaddr = VM_MAX_KERNEL_ADDRESS, }, @@ -434,7 +463,7 @@ pmap_growkernel(vaddr_t maxkvaddr) * memory system has been bootstrapped. After that point, either kmem_alloc * or malloc should be used. This function works by stealing pages from the * (to be) managed page pool, then implicitly mapping the pages (by using - * their k0seg addresses) and zeroing them. + * their direct mapped addresses) and zeroing them. * * It may be used once the physical memory segments have been pre-loaded * into the vm_physmem[] array. Early memory allocation MUST use this @@ -588,6 +617,25 @@ pmap_create(void) pmap->pm_minaddr = VM_MIN_ADDRESS; pmap->pm_maxaddr = VM_MAXUSER_ADDRESS; +// pmap->pm_uobject.vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); + + + mutex_init(&pmap->pm_obj_lock, MUTEX_DEFAULT, IPL_VM); + uvm_obj_init(&pmap->pm_uobject, NULL, false, 1); + uvm_obj_setlock(&pmap->pm_uobject, &pmap->pm_obj_lock); + + +// TAILQ_INIT(&pmap->pm_pvp_list); + TAILQ_INIT(&pmap->pm_ptp_list); +#ifdef _LP64 +#if defined(PMAP_HWPAGEWALKER) + TAILQ_INIT(&pmap->pm_pdetab_list); +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) + TAILQ_INIT(&pmap->pm_segtab_list); +#endif +#endif + pmap_segtab_init(pmap); #ifdef MULTIPROCESSOR @@ -625,9 +673,24 @@ pmap_destroy(pmap_t pmap) kpreempt_disable(); pmap_md_tlb_miss_lock_enter(); pmap_tlb_asid_release_all(pmap); + pmap_segtab_destroy(pmap, NULL, 0); pmap_md_tlb_miss_lock_exit(); +// KASSERT(TAILQ_EMPTY(&pmap->pm_pvp_list)); + KASSERT(TAILQ_EMPTY(&pmap->pm_ptp_list)); +#ifdef _LP64 +#if defined(PMAP_HWPAGEWALKER) + KASSERT(TAILQ_EMPTY(&pmap->pm_pdetab_list)); +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) + KASSERT(TAILQ_EMPTY(&pmap->pm_segtab_list)); +#endif +#endif + KASSERT(pmap->pm_uobject.uo_npages == 0); + + mutex_obj_free(pmap->pm_uobject.vmobjlock); + #ifdef MULTIPROCESSOR kcpuset_destroy(pmap->pm_active); kcpuset_destroy(pmap->pm_onproc); @@ -823,10 +886,12 @@ pmap_deactivate(struct lwp *l) kpreempt_disable(); KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu); pmap_md_tlb_miss_lock_enter(); +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS; #ifdef _LP64 curcpu()->ci_pmap_user_seg0tab = NULL; #endif +#endif pmap_tlb_asid_deactivate(pmap); pmap_md_tlb_miss_lock_exit(); kpreempt_enable(); @@ -891,7 +956,7 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, KASSERT(kpreempt_disabled()); - for (; sva < eva; sva += NBPG, ptep++) { + for (; sva < eva; sva += NBPG, ptep = pmap_md_nptep(ptep)) { const pt_entry_t pte = *ptep; if (!pte_valid_p(pte)) continue; @@ -1037,7 +1102,8 @@ pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, /* * Change protection on every valid mapping within this segment. */ - for (; sva < eva; sva += NBPG, ptep++) { + + for (; sva < eva; sva += NBPG, ptep = pmap_md_nptep(ptep)) { pt_entry_t pte = *ptep; if (!pte_valid_p(pte)) continue; @@ -1345,7 +1411,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags); kpreempt_disable(); - pt_entry_t * const ptep = pmap_pte_lookup(pmap, va); + pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, 0); + KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va, pmap_limits.virtual_end); KASSERT(!pte_valid_p(*ptep)); @@ -1402,7 +1469,7 @@ pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep, KASSERT(kpreempt_disabled()); - for (; sva < eva; sva += NBPG, ptep++) { + for (; sva < eva; sva += NBPG, ptep = pmap_md_nptep(ptep)) { pt_entry_t pte = *ptep; if (!pte_valid_p(pte)) continue; @@ -1469,6 +1536,8 @@ pmap_remove_all(struct pmap *pmap) pmap_md_tlb_miss_lock_exit(); pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE; + pmap_segtab_remove_all(pmap); + #ifdef PMAP_FAULTINFO curpcb->pcb_faultinfo.pfi_faultaddr = 0; curpcb->pcb_faultinfo.pfi_repeats = 0; @@ -1536,6 +1605,10 @@ pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) paddr_t pa; if (pmap == pmap_kernel()) { + if (pmap_md_kernel_vaddr_p(va)) { + pa = pmap_md_kernel_vaddr_to_paddr(va); + goto done; + } if (pmap_md_direct_mapped_vaddr_p(va)) { pa = pmap_md_direct_mapped_vaddr_to_paddr(va); goto done; diff --git a/sys/uvm/pmap/pmap.h b/sys/uvm/pmap/pmap.h index b9eb1f9..8552015 100644 --- a/sys/uvm/pmap/pmap.h +++ b/sys/uvm/pmap/pmap.h @@ -74,6 +74,7 @@ #ifndef _UVM_PMAP_PMAP_H_ #define _UVM_PMAP_PMAP_H_ +#include #include #ifdef UVMHIST UVMHIST_DECL(pmapexechist); @@ -92,12 +93,33 @@ UVMHIST_DECL(pmaphist); #define pmap_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET) /* - * Each seg_tab point an array of pt_entry [NPTEPG] + * Each ptpage maps a "segment" worth of address space. That is + * NPTEPG * PAGE_SIZE. */ + +typedef struct { + pt_entry_t ptp_ptes[NPTEPG]; +} pmap_ptpage_t; + +#if defined(PMAP_HWPAGEWALKER) +typedef union pmap_pdetab { + pd_entry_t pde_pde[PMAP_PDETABSIZE]; + union pmap_pdetab * pde_next; +} pmap_pdetab_t; +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) typedef union pmap_segtab { +#ifdef _LP64 union pmap_segtab * seg_seg[PMAP_SEGTABSIZE]; - pt_entry_t * seg_tab[PMAP_SEGTABSIZE]; +#endif + pmap_ptpage_t * seg_tab[PMAP_SEGTABSIZE]; +#ifdef PMAP_HWPAGEWALKER + pd_entry_t seg_pde[PMAP_PDETABSIZE]; +#endif + union pmap_segtab * seg_next; } pmap_segtab_t; +#endif + #ifdef _KERNEL struct pmap; @@ -110,6 +132,12 @@ void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t, void pmap_segtab_activate(struct pmap *, struct lwp *); void pmap_segtab_init(struct pmap *); void pmap_segtab_destroy(struct pmap *, pte_callback_t, uintptr_t); +//void pmap_segtab_destroy(struct pmap *); +void pmap_segtab_remove_all(struct pmap *); +#ifdef PMAP_HWPAGEWALKER +pd_entry_t *pmap_pde_lookup(struct pmap *, vaddr_t, paddr_t *); +bool pmap_pdetab_fixup(struct pmap *, vaddr_t); +#endif extern kmutex_t pmap_segtab_lock; #endif /* _KERNEL */ @@ -122,13 +150,33 @@ extern kmutex_t pmap_segtab_lock; * Machine dependent pmap structure. */ struct pmap { + struct uvm_object pm_uobject; +#define pm_lock pm_uobject.vmobjlock +#define pm_count pm_uobject.uo_refs /* pmap reference count */ +#define pm_pvp_list pm_uobject.memq + + kmutex_t pm_obj_lock; + struct pglist pm_ptp_list; +#ifdef _LP64 +#if defined(PMAP_HWPAGEWALKER) + struct pglist pm_pdetab_list; +#else + struct pglist pm_segtab_list; +#endif +#endif /* _LP64 */ #ifdef MULTIPROCESSOR kcpuset_t *pm_active; /* pmap was active on ... */ kcpuset_t *pm_onproc; /* pmap is active on ... */ volatile u_int pm_shootdown_pending; #endif +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) + pmap_pdetab_t * pm_pdetab; /* pointer to HW PDEs */ +#elif defined(PMAP_HWPAGEWALKER) + pmap_pdetab_t * pm_pdetab; /* pointer to HW PDEs */ + pmap_segtab_t * pm_segtab; /* virtual shadow of HW PDEs */ +#else pmap_segtab_t * pm_segtab; /* pointers to pages of PTEs */ - u_int pm_count; /* pmap reference count */ +#endif u_int pm_flags; #define PMAP_DEFERRED_ACTIVATE __BIT(0) struct pmap_statistics pm_stats; /* pmap statistics */ @@ -176,7 +224,11 @@ extern struct pmap_limits pmap_limits; extern u_int pmap_page_colormask; +#if defined(PMAP_HWPAGEWALKER) +extern pmap_pdetab_t pmap_kern_pdetab; +#else extern pmap_segtab_t pmap_kern_segtab; +#endif #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) diff --git a/sys/uvm/pmap/pmap_segtab.c b/sys/uvm/pmap/pmap_segtab.c index 72d3659..e33fbf3 100644 --- a/sys/uvm/pmap/pmap_segtab.c +++ b/sys/uvm/pmap/pmap_segtab.c @@ -106,10 +106,59 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.7 2019/03/08 08:12:40 msaitoh Exp #include #include +#include -CTASSERT(NBPG >= sizeof(pmap_segtab_t)); +#if defined(XSEGSHIFT) && XSEGSHIFT == SEGSHIFT +#undef XSEGSHIFT +#undef XSEGLENGTH +#undef NBXSEG +#undef NXSEGPG +#endif -struct pmap_segtab_info { +#define MULT_CTASSERT(a,b) __CTASSERT((a) < (b) || ((a) % (b) == 0)) + +__CTASSERT(sizeof(pmap_ptpage_t) == NBPG); + +#if defined(PMAP_HWPAGEWALKER) +#ifdef _LP64 +MULT_CTASSERT(PMAP_PDETABSIZE, NPDEPG); +MULT_CTASSERT(NPDEPG, PMAP_PDETABSIZE); +MULT_CTASSERT(PMAP_PDETABSIZE, NPDEPG); +#endif /* _LP64 */ +MULT_CTASSERT(sizeof(pmap_pdetab_t *), sizeof(pd_entry_t)); +MULT_CTASSERT(sizeof(pd_entry_t), sizeof(pmap_pdetab_t)); + +#if 0 +#ifdef _LP64 +static const bool separate_pdetab_root_p = NPDEPG != PMAP_PDETABSIZE; +#else +static const bool separate_pdetab_root_p = true; +#endif /* _LP64 */ +#endif + +typedef struct { + pmap_pdetab_t *free_pdetab0; /* free list kept locally */ + pmap_pdetab_t *free_pdetab; /* free list kept locally */ +#ifdef DEBUG + uint32_t nget; + uint32_t nput; + uint32_t npage; +#define PDETAB_ADD(n, v) (pmap_segtab_info.pdealloc.n += (v)) +#else +#define PDETAB_ADD(n, v) ((void) 0) +#endif /* DEBUG */ +} pmap_pdetab_alloc_t; +#endif /* PMAP_HWPAGEWALKER */ + +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) +#ifdef _LP64 +__CTASSERT(NSEGPG >= PMAP_SEGTABSIZE); +__CTASSERT(NSEGPG % PMAP_SEGTABSIZE == 0); +#endif +__CTASSERT(NBPG >= sizeof(pmap_segtab_t)); + +typedef struct { + pmap_segtab_t *free_segtab0; /* free list kept locally */ pmap_segtab_t *free_segtab; /* free list kept locally */ #ifdef DEBUG uint32_t nget_segtab; @@ -119,6 +168,16 @@ struct pmap_segtab_info { #else #define SEGTAB_ADD(n, v) ((void) 0) #endif +} pmap_segtab_alloc_t; +#endif /* !PMAP_HWPAGEWALKER || !PMAP_MAP_POOLPAGE */ + +struct pmap_segtab_info { +#if defined(PMAP_HWPAGEWALKER) + pmap_pdetab_alloc_t pdealloc; +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) + pmap_segtab_alloc_t segalloc; +#endif #ifdef PMAP_PTP_CACHE struct pgflist ptp_pgflist; /* Keep a list of idle page tables. */ #endif @@ -130,6 +189,7 @@ struct pmap_segtab_info { kmutex_t pmap_segtab_lock __cacheline_aligned; +#ifndef PMAP_HWPAGEWALKER static void pmap_check_stp(pmap_segtab_t *stp, const char *caller, const char *why) { @@ -147,6 +207,7 @@ pmap_check_stp(pmap_segtab_t *stp, const char *caller, const char *why) } #endif } +#endif /* PMAP_HWPAGEWALKER */ static inline struct vm_page * pmap_pte_pagealloc(void) @@ -164,6 +225,182 @@ pmap_pte_pagealloc(void) return pg; } +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) +static vaddr_t +pmap_pde_to_va(pd_entry_t pde) +{ + if (!pte_pde_valid_p(pde)) + return 0; + + paddr_t pa = pte_pde_to_paddr(pde); + return pmap_md_direct_map_paddr(pa); +} + +#ifdef _LP64 +static pmap_pdetab_t * +pmap_pde_to_pdetab(pd_entry_t pde) +{ + return (pmap_pdetab_t *) pmap_pde_to_va(pde); +} +#endif + +static pmap_ptpage_t * +pmap_pde_to_ptpage(pd_entry_t pde) +{ + return (pmap_ptpage_t *) pmap_pde_to_va(pde); +} +#endif + +#ifdef _LP64 +__CTASSERT((XSEGSHIFT - SEGSHIFT) % (PGSHIFT-3) == 0); +#endif + +static inline pmap_ptpage_t * +pmap_ptpage(struct pmap *pmap, vaddr_t va) +{ +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) + vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1; + pmap_pdetab_t *ptb = pmap->pm_pdetab; + + KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); + +#ifdef _LP64 + for (size_t segshift = XSEGSHIFT; + segshift > SEGSHIFT; + segshift -= PGSHIFT - 3, pdetab_mask = NSEGPG - 1) { + ptb = pmap_pde_to_pdetab(ptb->pde_pde[(va >> segshift) & pdetab_mask]); + if (ptb == NULL) + return NULL; + } +#endif + return pmap_pde_to_ptpage(ptb->pde_pde[(va >> SEGSHIFT) & pdetab_mask]); +#else + vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1; + pmap_segtab_t *stb = pmap->pm_segtab; + + KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); + +#ifdef _LP64 + for (size_t segshift = XSEGSHIFT; + segshift > SEGSHIFT; + segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) { + stb = stb->seg_seg[(va >> segshift) & segtab_mask]; + if (stb == NULL) + return NULL; + } +#endif + return stb->seg_tab[(va >> SEGSHIFT) & segtab_mask]; +#endif +} + +#if defined(PMAP_HWPAGEWALKER) +bool +pmap_pdetab_fixup(struct pmap *pmap, vaddr_t va) +{ + struct pmap * const kpm = pmap_kernel(); + pmap_pdetab_t * const kptb = kpm->pm_pdetab; + pmap_pdetab_t * const uptb = pmap->pm_pdetab; + size_t idx = PMAP_PDETABSIZE - 1; +#if !defined(PMAP_MAP_POOLPAGE) + __CTASSERT(PMAP_PDETABSIZE == PMAP_SEGTABSIZE); + pmap_segtab_t * const kstb = &pmap_kern_segtab; + pmap_segtab_t * const ustb = pmap->pm_segtab; +#endif + + // Regardless of how many levels deep this page table is, we only + // need to verify the first level PDEs match up. +#ifdef XSEGSHIFT + idx &= va >> XSEGSHIFT; +#else + idx &= va >> SEGSHIFT; +#endif + if (uptb->pde_pde[idx] != kptb->pde_pde[idx]) { + pte_pde_set(&uptb->pde_pde[idx], kptb->pde_pde[idx]); +#if !defined(PMAP_MAP_POOLPAGE) + ustb->seg_seg[idx] = kstb->seg_seg[idx]; // copy KVA of PTP +#endif + return true; + } + return false; +} +#endif /* PMAP_HWPAGEWALKER */ + + +static void +pmap_page_attach(pmap_t pmap, vaddr_t kva, struct vm_page *pg, + struct pglist *pglist, voff_t off) +{ + struct uvm_object * const uobj = &pmap->pm_uobject; + if (pg == NULL) { + paddr_t pa; + + bool ok = pmap_extract(pmap_kernel(), kva, &pa); + KASSERT(ok); + + pg = PHYS_TO_VM_PAGE(pa); + KASSERT(pg != NULL); + } + + mutex_spin_enter(uobj->vmobjlock); + TAILQ_INSERT_TAIL(pglist, pg, listq.queue); + uobj->uo_npages++; + mutex_spin_exit(uobj->vmobjlock); + + /* + * Now set each vm_page that maps this page to point to the + * pmap and set the offset to what we want. + */ + KASSERT(pg->uobject == NULL); + pg->uobject = uobj; + pg->offset = off; +} + +static struct vm_page * +pmap_segtab_pageclean(pmap_t pmap, struct pglist *list, vaddr_t va) +{ + struct uvm_object * const uobj = &pmap->pm_uobject; + + paddr_t pa; + + bool ok = pmap_extract(pmap_kernel(), va, &pa); + KASSERT(ok); + +// const paddr_t pa = kvtophys(va); + struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); + + KASSERT(pg->uobject == uobj); + + mutex_spin_enter(uobj->vmobjlock); + TAILQ_REMOVE(list, pg, listq.queue); + uobj->uo_npages--; + mutex_spin_exit(uobj->vmobjlock); + + pg->uobject = NULL; + pg->offset = 0; + + return pg; +} + +#ifndef PMAP_PTP_CACHE +static void +pmap_segtab_pagefree(pmap_t pmap, struct pglist *list, vaddr_t kva, size_t size) +{ +#ifdef POOL_VTOPHYS + if (size == PAGE_SIZE) { + uvm_pagefree(pmap_segtab_pageclean(pmap, list, kva)); + return; + } +#endif + for (size_t i = 0; i < size; i += PAGE_SIZE) { + (void)pmap_segtab_pageclean(pmap, list, kva + i); + } + + uvm_km_free(kernel_map, kva, size, UVM_KMF_WIRED); +} +#endif + +#if 0 +#ifndef PMAP_HWPAGEWALKER static inline pt_entry_t * pmap_segmap(struct pmap *pmap, vaddr_t va) { @@ -178,92 +415,161 @@ pmap_segmap(struct pmap *pmap, vaddr_t va) return stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]; } +#endif /* PMAP_HWPAGEWALKER */ +#endif pt_entry_t * pmap_pte_lookup(pmap_t pmap, vaddr_t va) { - pt_entry_t *pte = pmap_segmap(pmap, va); - if (pte == NULL) + pmap_ptpage_t * const ptp = pmap_ptpage(pmap, va); + if (ptp == NULL) return NULL; - return pte + ((va >> PGSHIFT) & (NPTEPG - 1)); + const size_t pte_idx = pte_index(va); + + return ptp->ptp_ptes + pte_idx; } +#ifdef _LP64 +#if defined(PMAP_HWPAGEWALKER) static void -pmap_segtab_free(pmap_segtab_t *stp) +pmap_pdetab_free(pmap_pdetab_t *ptb) { /* - * Insert the segtab into the segtab freelist. + * Insert the pdetab into the pdetab freelist. */ mutex_spin_enter(&pmap_segtab_lock); - stp->seg_seg[0] = pmap_segtab_info.free_segtab; - pmap_segtab_info.free_segtab = stp; - SEGTAB_ADD(nput, 1); + ptb->pde_next = pmap_segtab_info.pdealloc.free_pdetab; + pmap_segtab_info.pdealloc.free_pdetab = ptb; + PDETAB_ADD(nput, 1); mutex_spin_exit(&pmap_segtab_lock); + } +#endif +#endif -static void -pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp, - pte_callback_t callback, uintptr_t flags, - vaddr_t va, vsize_t vinc) -{ - pmap_segtab_t *stp = *stp_p; - for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1); - i < PMAP_SEGTABSIZE; - i++, va += vinc) { -#ifdef _LP64 - if (vinc > NBSEG) { - if (stp->seg_seg[i] != NULL) { - pmap_segtab_release(pmap, &stp->seg_seg[i], - true, callback, flags, va, vinc / NSEGPG); - KASSERT(stp->seg_seg[i] == NULL); - } - continue; +static pmap_ptpage_t * +pmap_ptpage_alloc(pmap_t pmap, int flags, paddr_t *pa_p) +{ +#ifdef PMAP_MAP_POOLPAGE + struct vm_page *pg = NULL; + pmap_ptpage_t *ptp = NULL; + paddr_t pa; +#ifdef PMAP_PTP_CACHE + ptp = pmap_pgcache_alloc(&pmap_segtab_info.ptp_flist); +#endif + if (ptp == NULL) { + pg = pmap_pte_pagealloc(); + if (pg == NULL) { + if (flags & PMAP_CANFAIL) + return NULL; + panic("%s: cannot allocate page table page ", + __func__); } + pa = VM_PAGE_TO_PHYS(pg); + ptp = (pmap_ptpage_t *)PMAP_MAP_POOLPAGE(pa); + } else { + bool ok = pmap_extract(pmap_kernel(), (vaddr_t)ptp, &pa); + KASSERT(ok); + } + + pmap_page_attach(pmap, (vaddr_t)ptp, pg, &pmap->pm_ptp_list, 0); + + *pa_p = pa; + return ptp; +#else + vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE, + UVM_KMF_WIRED|UVM_KMF_WAITVA + |(flags & PMAP_CANFAIL ? UVM_KMF_CANFAIL : 0)); + if (kva == 0) { + if (flags & PMAP_CANFAIL) + return NULL; + panic("%s: cannot allocate page table page", __func__); + } + pmap_page_attach(pmap, kva, NULL, &pmap->pm_ptp_list, 0); + return (pmap_ptpage_t *)kva; #endif - KASSERT(vinc == NBSEG); +} - /* get pointer to segment map */ - pt_entry_t *pte = stp->seg_tab[i]; - if (pte == NULL) - continue; +static void +pmap_ptpage_free(pmap_t pmap, pmap_ptpage_t *ptp) +{ + const vaddr_t kva = (vaddr_t) ptp; - /* - * If our caller want a callback, do so. - */ - if (callback != NULL) { - (*callback)(pmap, va, va + vinc, pte, flags); - } #ifdef DEBUG - for (size_t j = 0; j < NPTEPG; j++) { - if (!pte_zero_p(pte[j])) - panic("%s: pte entry %p not 0 (%#"PRIxPTE")", - __func__, &pte[j], pte_value(pte[j])); - } + for (size_t j = 0; j < NPTEPG; j++) { + if (ptp->ptp_ptes[j]) + panic("%s: pte entry %p not 0 (%#x)", + __func__, &ptp->ptp_ptes[j], + ptp->ptp_ptes[j]); + } #endif - // PMAP_UNMAP_POOLPAGE should handle any VCA issues itself - paddr_t pa = PMAP_UNMAP_POOLPAGE((vaddr_t)pte); - struct vm_page *pg = PHYS_TO_VM_PAGE(pa); + //pmap_md_vca_clean(pg, (vaddr_t)ptp, NBPG); #ifdef PMAP_PTP_CACHE - mutex_spin_enter(&pmap_segtab_lock); - LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, pg, listq.list); - mutex_spin_exit(&pmap_segtab_lock); + pmap_segtab_pageclean(pmap, &pmap->pm_ptp_list, kva); + pmap_segtab_pagecache(&pmap_segtab_info.ptp_flist, ptp); #else - uvm_pagefree(pg); + pmap_segtab_pagefree(pmap, &pmap->pm_ptp_list, kva, PAGE_SIZE); +#endif /* PMAP_PTP_CACHE */ +} + + +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) +static void +pmap_segtab_free(pmap_segtab_t *stp) +{ + /* + * Insert the segtab into the segtab freelist. + */ + mutex_spin_enter(&pmap_segtab_lock); + stp->seg_next = pmap_segtab_info.segalloc.free_segtab; + pmap_segtab_info.segalloc.free_segtab = stp; + SEGTAB_ADD(nput, 1); + mutex_spin_exit(&pmap_segtab_lock); +} #endif - stp->seg_tab[i] = NULL; +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) +static pmap_pdetab_t * +pmap_pdetab_alloc(void) +{ + pmap_pdetab_t *ptb; + + again: + mutex_spin_enter(&pmap_segtab_lock); + if (__predict_true((ptb = pmap_segtab_info.pdealloc.free_pdetab) != NULL)) { + pmap_segtab_info.pdealloc.free_pdetab = ptb->pde_next; + PDETAB_ADD(nget, 1); + ptb->pde_next = NULL; } + mutex_spin_exit(&pmap_segtab_lock); - if (free_stp) { - pmap_check_stp(stp, __func__, - vinc == NBSEG ? "release seg" : "release xseg"); - pmap_segtab_free(stp); - *stp_p = NULL; + if (__predict_false(ptb == NULL)) { + struct vm_page * const ptb_pg = pmap_pte_pagealloc(); + + if (__predict_false(ptb_pg == NULL)) { + /* + * XXX What else can we do? Could we deadlock here? + */ + uvm_wait(__func__); + goto again; + } + + PDETAB_ADD(npage, 1); + const paddr_t ptb_pa = VM_PAGE_TO_PHYS(ptb_pg); + ptb = (pmap_pdetab_t *)PMAP_MAP_POOLPAGE(ptb_pa); + + if (pte_invalid_pde() != 0) { + for (size_t i = 0; i < NPDEPG; i++) { + ptb->pde_pde[i] = pte_invalid_pde(); + } + } } -} + return ptb; +} +#else /* * Create and return a physical map. * @@ -284,10 +590,10 @@ pmap_segtab_alloc(void) again: mutex_spin_enter(&pmap_segtab_lock); - if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) { - pmap_segtab_info.free_segtab = stp->seg_seg[0]; - stp->seg_seg[0] = NULL; + if (__predict_true((stp = pmap_segtab_info.segalloc.free_segtab) != NULL)) { + pmap_segtab_info.segalloc.free_segtab = stp->seg_seg[0]; SEGTAB_ADD(nget, 1); + stp->seg_seg[0] = NULL; found_on_freelist = true; } mutex_spin_exit(&pmap_segtab_lock); @@ -312,14 +618,14 @@ pmap_segtab_alloc(void) * link all the segtabs in this page together */ for (size_t i = 1; i < n - 1; i++) { - stp[i].seg_seg[0] = &stp[i+1]; + stp[i].seg_seg[0] = &stp[i + 1]; } /* * Now link the new segtabs into the free segtab list. */ mutex_spin_enter(&pmap_segtab_lock); - stp[n-1].seg_seg[0] = pmap_segtab_info.free_segtab; - pmap_segtab_info.free_segtab = stp + 1; + stp[n - 1].seg_seg[0] = pmap_segtab_info.segalloc.free_segtab; + pmap_segtab_info.segalloc.free_segtab = stp + 1; SEGTAB_ADD(nput, n - 1); mutex_spin_exit(&pmap_segtab_lock); } @@ -330,6 +636,72 @@ pmap_segtab_alloc(void) return stp; } +#endif + + +#ifndef PMAP_HWPAGEWALKER +static void +pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp, + pte_callback_t callback, uintptr_t flags, + vaddr_t va, vsize_t vinc) +{ + pmap_segtab_t *stp = *stp_p; + + for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1); + i < PMAP_SEGTABSIZE; + i++, va += vinc) { +#ifdef _LP64 + if (vinc > NBSEG) { + if (stp->seg_seg[i] != NULL) { + pmap_segtab_release(pmap, &stp->seg_seg[i], + true, callback, flags, va, vinc / NSEGPG); + KASSERT(stp->seg_seg[i] == NULL); + } + continue; + } +#endif + KASSERT(vinc == NBSEG); + + /* get pointer to segment map */ + pt_entry_t *pte = stp->seg_tab[i]->ptp_ptes; + if (pte == NULL) + continue; + + /* + * If our caller want a callback, do so. + */ + if (callback != NULL) { + (*callback)(pmap, va, va + vinc, pte, flags); + } +#ifdef DEBUG + for (size_t j = 0; j < NPTEPG; j++) { + if (!pte_zero_p(pte[j])) + panic("%s: pte entry %p not 0 (%#"PRIxPTE")", + __func__, &pte[j], pte_value(pte[j])); + } +#endif + // PMAP_UNMAP_POOLPAGE should handle any VCA issues itself + paddr_t pa = PMAP_UNMAP_POOLPAGE((vaddr_t)pte); + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); +#ifdef PMAP_PTP_CACHE + mutex_spin_enter(&pmap_segtab_lock); + LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, pg, listq.list); + mutex_spin_exit(&pmap_segtab_lock); +#else + uvm_pagefree(pg); +#endif + + stp->seg_tab[i] = NULL; + } + + if (free_stp) { + pmap_check_stp(stp, __func__, + vinc == NBSEG ? "release seg" : "release xseg"); + pmap_segtab_free(stp); + *stp_p = NULL; + } +} +#endif /* * Allocate the top segment table for the pmap. @@ -337,8 +709,49 @@ pmap_segtab_alloc(void) void pmap_segtab_init(pmap_t pmap) { - +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) + /* + * Constantly converting from extracted PA to VA is somewhat expensive + * for systems with hardware page walkers and without an inexpensive + * way to access arbitrary virtual addresses, so we allocate an extra + * root segtab so that it can contain non-virtual addresses. + */ pmap->pm_segtab = pmap_segtab_alloc(); +#endif +#ifdef PMAP_HWPAGEWALKER + pmap->pm_pdetab = pmap_pdetab_alloc(); + pmap_md_pdetab_init(pmap); +#endif +} + +void +pmap_segtab_remove_all(pmap_t pmap) +{ + struct pglist *list; + struct vm_page *pg; + +// pmap_pv_remove_all(pmap); +// KASSERT(TAILQ_EMPTY(&pmap->pm_pvp_list)); + +#if defined(_LP64) +#if defined(PMAP_HWPAGEWALKER) + list = &pmap->pm_pdetab_list; + while ((pg = TAILQ_FIRST(list)) != TAILQ_END(list)) { + pmap_pdetab_t * const ptb = (pmap_pdetab_t *) VM_PAGE_TO_MD(pg)->mdpg_first.pv_va; + pmap_pdetab_free(ptb); + } +#else + list = &pmap->pm_segtab_list; + while ((pg = TAILQ_FIRST(list)) != TAILQ_END(list)) { + pmap_segtab_t * const stb = (pmap_segtab_t *) VM_PAGE_TO_MD(pg)->mdpg_first.pv_va; + pmap_segtab_free(stb); + } +#endif +#endif /* _LP64 */ + list = &pmap->pm_ptp_list; + while ((pg = TAILQ_FIRST(list)) != TAILQ_END(list)) { + pmap_ptpage_free(pmap, (pmap_ptpage_t *) VM_PAGE_TO_MD(pg)->mdpg_first.pv_va); + } } /* @@ -349,6 +762,17 @@ pmap_segtab_init(pmap_t pmap) void pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags) { + KASSERT(pmap != pmap_kernel()); +#if defined(PMAP_HWPAGEWALKER) +#if !defined(PMAP_MAP_POOLPAGE) + KASSERT((pmap->pm_segtab == NULL) == (pmap->pm_pdetab == NULL)); +#endif + KASSERT(pmap->pm_pdetab == NULL); +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) +// KASSERT(pmap->pm_segtab == NULL); + + //XXXNH need the rest of this function converting to pmap_segtab_remove_all if (pmap->pm_segtab == NULL) return; @@ -359,6 +783,7 @@ pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags) #endif pmap_segtab_release(pmap, &pmap->pm_segtab, func == NULL, func, flags, pmap->pm_minaddr, vinc); +#endif } /* @@ -368,17 +793,26 @@ void pmap_segtab_activate(struct pmap *pm, struct lwp *l) { if (l == curlwp) { - struct cpu_info * const ci = l->l_cpu; KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap); if (pm == pmap_kernel()) { - ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS; +#if defined(PMAP_HWPAGEWALKER) + pmap_md_pdetab_activate(pm, l); +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) + l->l_cpu->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS; #ifdef _LP64 - ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS; + l->l_cpu->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS; +#endif #endif } else { - ci->ci_pmap_user_segtab = pm->pm_segtab; +#if defined(PMAP_HWPAGEWALKER) + pmap_md_pdetab_activate(pm, l); +#endif +#if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_POOLPAGE) + l->l_cpu->ci_pmap_user_segtab = pm->pm_segtab; #ifdef _LP64 - ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0]; + l->l_cpu->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0]; +#endif #endif } } @@ -422,6 +856,103 @@ pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva, } } + + + + +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) +static pd_entry_t * +pmap_pdetab_reserve(struct pmap *pmap, vaddr_t va) +#elif defined(PMAP_HWPAGEWALKER) +static pmap_ptpage_t ** +pmap_segtab_reserve(struct pmap *pmap, vaddr_t va, pd_entry_t **pde_p) +#else +static pmap_ptpage_t ** +pmap_segtab_reserve(struct pmap *pmap, vaddr_t va) +#endif +{ +#if defined(PMAP_HWPAGEWALKER) + pmap_pdetab_t *ptb = pmap->pm_pdetab; +#endif +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) + vaddr_t segtab_mask = PMAP_PDETABSIZE - 1; +#ifdef _LP64 + for (size_t segshift = XSEGSHIFT; + segshift > SEGSHIFT; + segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) { + pd_entry_t * const pde_p = + &ptb->pde_pde[(va >> segshift) & segtab_mask]; + pd_entry_t opde = *pde_p; + if (__predict_false(!pte_pde_valid_p(opde))) { + ptb = pmap_pdetab_alloc(); + pd_entry_t npde = pte_pde_pdetab( + pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)ptb), + pmap == pmap_kernel()); + opde = pte_pde_cas(pde_p, opde, npde); + if (__predict_false(pte_pde_valid_p(opde))) { + pmap_pdetab_free(ptb); + } else { + opde = npde; + } + } + ptb = pmap_pde_to_pdetab(opde); + } +#elif defined(XSEGSHIFT) + pd_entry_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask]; + KASSERT(pte_pde_valid_p(opde)); + ptb = pmap_pde_to_pdetab(opde); + segtab_mask = NSEGPG - 1; +#endif /* _LP64 */ + const size_t idx = (va >> SEGSHIFT) & segtab_mask; + return &ptb->pde_pde[idx]; +#else + pmap_segtab_t *stb = pmap->pm_segtab; + vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1; +#ifdef _LP64 + for (size_t segshift = XSEGSHIFT; + segshift > SEGSHIFT; + segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) { + size_t idx = (va >> segshift) & segtab_mask; + pmap_segtab_t ** const stb_p = &stb->seg_seg[idx]; +#if defined(PMAP_HWPAGEWALKER) + pmap_pdetab_t ** const ptb_p = &ptb->pde_pde[idx]; +#endif + if (__predict_false((stb = *stb_p) == NULL)) { + stb = pmap_segtab_alloc(); +#ifdef MULTIPROCESSOR + pmap_segtab_t *ostb = atomic_cas_ptr(stb_p, NULL, stb); + if (__predict_false(ostb != NULL)) { + pmap_segtab_free(stb); + stb = ostb; + } +#else + *stb_p = stb; +#endif + } + } +#else + pmap_segtab_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask]; + KASSERT(pte_pde_valid_p(opde)); + ptb = pmap_pde_to_pdetab(opde); + segtab_mask = NSEGPG - 1; + +#endif /* _LP64 */ + size_t idx = (va >> SEGSHIFT) & segtab_mask; +#if defined(PMAP_HWPAGEWALKER) +#if defined(XSEGSHIFT) && XSEGSHIFT != SEGSHIFT) + *pte_p = &pmap->pm_segtab +#else + *pde_p = &ptb->pde_pde[idx]; +#endif +#endif + return &stb->seg_tab[idx]; +#endif +} + + + + + /* * Return a pointer for the pte that corresponds to the specified virtual * address (va) in the target physical map, allocating if needed. @@ -429,84 +960,58 @@ pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t * pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags) { - pmap_segtab_t *stp = pmap->pm_segtab; - pt_entry_t *pte; - - pte = pmap_pte_lookup(pmap, va); - if (__predict_false(pte == NULL)) { -#ifdef _LP64 - pmap_segtab_t ** const stp_p = - &stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]; - if (__predict_false((stp = *stp_p) == NULL)) { - pmap_segtab_t *nstp = pmap_segtab_alloc(); -#ifdef MULTIPROCESSOR - pmap_segtab_t *ostp = atomic_cas_ptr(stp_p, NULL, nstp); - if (__predict_false(ostp != NULL)) { - pmap_check_stp(nstp, __func__, "reserve"); - pmap_segtab_free(nstp); - nstp = ostp; - } +// const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1); + pmap_ptpage_t *ptp; + paddr_t pa = 0; + +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) + pd_entry_t * const pde_p = pmap_pdetab_reserve(pmap, va); + ptp = pmap_pde_to_ptpage(*pde_p); +#elif defined(PMAP_HWPAGEWALKER) + pmap_ptpage_t ** const ptp_p = pmap_segtab_reserve(pmap, va, &pde_p); + ptp = *ptp_p; #else - *stp_p = nstp; -#endif /* MULTIPROCESSOR */ - stp = nstp; - } - KASSERT(stp == pmap->pm_segtab->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]); -#endif /* _LP64 */ - struct vm_page *pg = NULL; -#ifdef PMAP_PTP_CACHE - mutex_spin_enter(&pmap_segtab_lock); - if ((pg = LIST_FIRST(&pmap_segtab_info.ptp_pgflist)) != NULL) { - LIST_REMOVE(pg, listq.list); - KASSERT(LIST_FIRST(&pmap_segtab_info.ptp_pgflist) != pg); - } - mutex_spin_exit(&pmap_segtab_lock); + pmap_ptpage_t ** const ptp_p = pmap_segtab_reserve(pmap, va); + ptp = *ptp_p; #endif - if (pg == NULL) - pg = pmap_pte_pagealloc(); - if (pg == NULL) { - if (flags & PMAP_CANFAIL) - return NULL; - panic("%s: cannot allocate page table page " - "for va %" PRIxVADDR, __func__, va); - } - const paddr_t pa = VM_PAGE_TO_PHYS(pg); - pte = (pt_entry_t *)PMAP_MAP_POOLPAGE(pa); - pt_entry_t ** const pte_p = - &stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]; + if (__predict_false(ptp == NULL)) { + ptp = pmap_ptpage_alloc(pmap, flags, &pa); + if (__predict_false(ptp == NULL)) + return NULL; + +#if defined(PMAP_HWPAGEWALKER) + pd_entry_t npde = pte_pde_ptpage(pa, pmap == pmap_kernel()); +#endif +#if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_POOLPAGE) + pd_entry_t opde = *pde_p; + opde = pte_pde_cas(pde_p, opde, npde); + if (__predict_false(pte_pde_valid_p(opde))) { + pmap_ptpage_free(pmap, ptp); + ptp = pmap_pde_to_ptpage(opde); + } +#else #ifdef MULTIPROCESSOR - pt_entry_t *opte = atomic_cas_ptr(pte_p, NULL, pte); + pmap_ptpage_t *optp = atomic_cas_ptr(ptp_p, NULL, ptp); /* * If another thread allocated the segtab needed for this va * free the page we just allocated. */ - if (__predict_false(opte != NULL)) { -#ifdef PMAP_PTP_CACHE - mutex_spin_enter(&pmap_segtab_lock); - LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, - pg, listq.list); - mutex_spin_exit(&pmap_segtab_lock); -#else - PMAP_UNMAP_POOLPAGE((vaddr_t)pte); - uvm_pagefree(pg); + if (__predict_false(optp != NULL)) { + pmap_ptpage_free(pmap, ptp); + ptp = optp; +#if defined(PMAP_HWPAGEWALKER) + } else { + pte_pde_set(pde_p, npde); #endif - pte = opte; } -#else - *pte_p = pte; -#endif - KASSERT(pte == stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]); - -#ifdef DEBUG - for (size_t i = 0; i < NPTEPG; i++) { - if (!pte_zero_p(pte[i])) - panic("%s: new segmap %p not empty @ %zu", - __func__, pte, i); - } -#endif - pte += (va >> PGSHIFT) & (NPTEPG - 1); +#else /* !MULTIPROCESSOR */ + *ptp_p = ptp; +#endif /* MULTIPROCESSOR */ +#endif /* PMAP_HWPAGEWALKER && PMAP_MAP_POOLPAGE */ } - return pte; + const size_t pte_idx = pte_index(va); + + return ptp->ptp_ptes + pte_idx; } diff --git a/sys/uvm/pmap/pmap_tlb.c b/sys/uvm/pmap/pmap_tlb.c index eb85072..4844a0e 100644 --- a/sys/uvm/pmap/pmap_tlb.c +++ b/sys/uvm/pmap/pmap_tlb.c @@ -724,7 +724,6 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) } #endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */ -#ifndef PMAP_HWPAGEWALKER int pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pte, u_int flags) { @@ -734,7 +733,7 @@ pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pte, u_int flags) UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, - " (pm=%#jx va=%#j, pte=%#jx flags=%#jx)", + " (pm=%#jx va=%#jx, pte=%#jx flags=%#jx)", (uintptr_t)pm, va, pte_value(pte), flags); KASSERT(kpreempt_disabled()); @@ -765,7 +764,6 @@ pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pte, u_int flags) return rv; } -#endif /* !PMAP_HWPAGEWALKER */ void pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va) diff --git a/sys/uvm/pmap/pmap_tlb.h b/sys/uvm/pmap/pmap_tlb.h index e3221c8..be28cd4 100644 --- a/sys/uvm/pmap/pmap_tlb.h +++ b/sys/uvm/pmap/pmap_tlb.h @@ -143,7 +143,7 @@ struct pmap_tlb_info { #endif struct evcnt ti_evcnt_asid_reinits; #ifndef PMAP_TLB_BITMAP_LENGTH -#define PMAP_TLB_BITMAP_LENGTH 256 +#define PMAP_TLB_BITMAP_LENGTH 4096 #endif __BITMAP_TYPE(, u_long, PMAP_TLB_BITMAP_LENGTH) ti_asid_bitmap; }; diff --git a/sys/uvm/pmap/vmpagemd.h b/sys/uvm/pmap/vmpagemd.h index bef315f..ae49f96 100644 --- a/sys/uvm/pmap/vmpagemd.h +++ b/sys/uvm/pmap/vmpagemd.h @@ -63,6 +63,8 @@ typedef struct pv_entry { #define PV_KENTER __BIT(0) } *pv_entry_t; +#define PV_ISKENTRY_P(pv) (((pv->pv_va) & PV_KENTRY) != 0) + #ifndef _MODULE #define VM_PAGEMD_REFERENCED __BIT(0) /* page has been referenced */ @@ -73,14 +75,14 @@ typedef struct pv_entry { #define VM_PAGEMD_UNCACHED __BIT(4) /* page is mapped uncached */ #endif +#define VM_PAGEMD_REFERENCED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0) +#define VM_PAGEMD_MODIFIED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0) +#define VM_PAGEMD_POOLPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0) +#define VM_PAGEMD_EXECPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0) #ifdef PMAP_VIRTUAL_CACHE_ALIASES #define VM_PAGEMD_CACHED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) == 0) #define VM_PAGEMD_UNCACHED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_UNCACHED) != 0) #endif -#define VM_PAGEMD_MODIFIED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_MODIFIED) != 0) -#define VM_PAGEMD_REFERENCED_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_REFERENCED) != 0) -#define VM_PAGEMD_POOLPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_POOLPAGE) != 0) -#define VM_PAGEMD_EXECPAGE_P(mdpg) (((mdpg)->mdpg_attrs & VM_PAGEMD_EXECPAGE) != 0) #endif /* !_MODULE */ @@ -100,16 +102,13 @@ struct vm_page_md { #define VM_PAGEMD_PVLIST_LOCK_INIT(mdpg) do { } while (/*CONSTCOND*/ 0) #endif /* MULTIPROCESSOR || MODULAR */ -#define VM_PAGEMD_PVLIST_LOCK(mdpg) \ - pmap_pvlist_lock(mdpg, 1) -#define VM_PAGEMD_PVLIST_READLOCK(mdpg) \ - pmap_pvlist_lock(mdpg, 0) -#define VM_PAGEMD_PVLIST_UNLOCK(mdpg) \ - pmap_pvlist_unlock(mdpg) -#define VM_PAGEMD_PVLIST_LOCKED_P(mdpg) \ - pmap_pvlist_locked_p(mdpg) -#define VM_PAGEMD_PVLIST_GEN(mdpg) \ - ((mdpg)->mdpg_attrs >> 16) +#define VM_PAGEMD_PVLIST_LOCK(mdpg) pmap_pvlist_lock(mdpg, 1) +#define VM_PAGEMD_PVLIST_READLOCK(mdpg) pmap_pvlist_lock(mdpg, 0) +#define VM_PAGEMD_PVLIST_UNLOCK(mdpg) pmap_pvlist_unlock(mdpg) +#define VM_PAGEMD_PVLIST_LOCKED_P(mdpg) pmap_pvlist_locked_p(mdpg) +#define VM_PAGEMD_PVLIST_GEN(mdpg) ((mdpg)->mdpg_attrs >> 16) + +#define VM_PAGEMD_PVLIST_EMPTY_P(mdpg) ((mdpg)->mdpg_first.pv_pmap == NULL) #ifdef _KERNEL #if defined(MULTIPROCESSOR) || defined(MODULAR)