? sys/cscope.out ? sys/dev/hdaudio.new Index: sys/arch/arm/arm/cpufunc.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm/cpufunc.c,v retrieving revision 1.175 diff -u -p -r1.175 cpufunc.c --- sys/arch/arm/arm/cpufunc.c 20 Oct 2018 06:35:34 -0000 1.175 +++ sys/arch/arm/arm/cpufunc.c 26 Feb 2020 11:59:34 -0000 @@ -1776,7 +1776,7 @@ set_cpufuncs(void) #ifdef ARM11_CACHE_WRITE_THROUGH pmap_pte_init_arm11(); #else - pmap_pte_init_generic(); + pmap_pte_init_armv6(); #endif if (arm_cache_prefer_mask) uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; Index: sys/arch/arm/arm32/arm32_boot.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/arm32_boot.c,v retrieving revision 1.33 diff -u -p -r1.33 arm32_boot.c --- sys/arch/arm/arm32/arm32_boot.c 16 Mar 2019 10:05:40 -0000 1.33 +++ sys/arch/arm/arm32/arm32_boot.c 26 Feb 2020 11:59:35 -0000 @@ -214,6 +214,9 @@ initarm_common(vaddr_t kvm_base, vsize_t * of the stack memory. */ VPRINTF("init subsystems: stacks "); + printf("\n"); + printf("fiq %lx irq %lx abt %lx und %lx\n", fiqstack.pv_va + FIQ_STACK_SIZE * PAGE_SIZE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); + printf("\n"); set_stackptr(PSR_FIQ32_MODE, fiqstack.pv_va + FIQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_IRQ32_MODE, @@ -381,6 +384,15 @@ cpu_hatch(struct cpu_info *ci, u_int cpu * SVC32 stack of the idlelwp). */ VPRINTF(" stacks"); + printf("\n"); + printf("fiq %lx irq %lx abt %lx und %lx\n", + fiqstack.pv_va + (cpu_index(ci) + 1) * FIQ_STACK_SIZE * PAGE_SIZE, + irqstack.pv_va + (cpu_index(ci) + 1) * IRQ_STACK_SIZE * PAGE_SIZE, + abtstack.pv_va + (cpu_index(ci) + 1) * ABT_STACK_SIZE * PAGE_SIZE, + undstack.pv_va + (cpu_index(ci) + 1) * UND_STACK_SIZE * PAGE_SIZE + ); + printf("\n"); + set_stackptr(PSR_FIQ32_MODE, fiqstack.pv_va + (cpu_index(ci) + 1) * FIQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_IRQ32_MODE, Index: sys/arch/arm/arm32/arm32_kvminit.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/arm32_kvminit.c,v retrieving revision 1.55 diff -u -p -r1.55 arm32_kvminit.c --- sys/arch/arm/arm32/arm32_kvminit.c 8 May 2019 13:18:47 -0000 1.55 +++ sys/arch/arm/arm32/arm32_kvminit.c 26 Feb 2020 11:59:35 -0000 @@ -347,9 +347,12 @@ add_pages(struct bootmem_info *bmi, pv_a static void valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, - int prot, int cache, bool zero_p) + int prot, int cache, bool zero_p, bool guard) { size_t nbytes = npages * PAGE_SIZE; + if (guard) + nbytes += PAGE_SIZE; + pv_addr_t *free_pv = bmi->bmi_freeblocks; size_t free_idx = 0; static bool l1pt_found; @@ -367,7 +370,7 @@ valloc_pages(struct bootmem_info *bmi, p VPRINTF(" l1pt"); valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true, true); add_pages(bmi, &kernel_l1pt); } @@ -392,6 +395,8 @@ valloc_pages(struct bootmem_info *bmi, p pv->pv_pa = free_pv->pv_pa; pv->pv_va = free_pv->pv_va; pv->pv_size = nbytes; + if (guard) + pv->pv_size -= PAGE_SIZE; pv->pv_prot = prot; pv->pv_cache = cache; @@ -535,7 +540,7 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b */ VPRINTF(" vector"); valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true, false); add_pages(bmi, &bmi->bmi_vector_l2pt); } @@ -545,7 +550,7 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b VPRINTF(" kernel"); for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { valloc_pages(bmi, &kernel_l2pt[idx], 1, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true, false); add_pages(bmi, &kernel_l2pt[idx]); } @@ -555,7 +560,7 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b VPRINTF(" vm"); for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { valloc_pages(bmi, &vmdata_l2pt[idx], 1, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true, true); add_pages(bmi, &vmdata_l2pt[idx]); } @@ -565,7 +570,7 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b if (iovbase) { VPRINTF(" io"); valloc_pages(bmi, &bmi->bmi_io_l2pt, 1, - VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true, false); add_pages(bmi, &bmi->bmi_io_l2pt); } @@ -573,28 +578,28 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b /* Allocate stacks for all modes and CPUs */ valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true, true); add_pages(bmi, &abtstack); valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true, true); add_pages(bmi, &fiqstack); valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true, true); add_pages(bmi, &irqstack); valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true, true); add_pages(bmi, &undstack); valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true, true); add_pages(bmi, &idlestack); valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true, true); add_pages(bmi, &kernelstack); /* Allocate the message buffer from the end of memory. */ const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; valloc_pages(bmi, &msgbuf, msgbuf_pgs, - VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false); + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false, false); add_pages(bmi, &msgbuf); msgbufphys = msgbuf.pv_pa; msgbufaddr = (void *)msgbuf.pv_va; @@ -608,7 +613,7 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b VPRINTF(" vector"); valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, - PTE_CACHE, true); + PTE_CACHE, true, false); } systempage.pv_va = vectors; @@ -621,7 +626,7 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b if (xscale_use_minidata) #endif valloc_pages(bmi, &minidataclean, 1, - VM_PROT_READ|VM_PROT_WRITE, 0, true); + VM_PROT_READ|VM_PROT_WRITE, 0, true, false); #endif /* @@ -1021,3 +1026,4 @@ arm32_kernel_vm_init(vaddr_t kernel_vm_b VPRINTF(" OK\n"); } + Index: sys/arch/arm/arm32/arm32_machdep.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/arm32_machdep.c,v retrieving revision 1.128.2.1 diff -u -p -r1.128.2.1 arm32_machdep.c --- sys/arch/arm/arm32/arm32_machdep.c 12 Feb 2020 20:10:09 -0000 1.128.2.1 +++ sys/arch/arm/arm32/arm32_machdep.c 26 Feb 2020 11:59:35 -0000 @@ -600,6 +600,8 @@ parse_mi_bootargs(char *args) || get_bootconf_option(args, "-x", BOOTOPT_TYPE_BOOLEAN, &integer)) if (integer) boothowto |= AB_DEBUG; +boothowto |= AB_VERBOSE | AB_DEBUG; + } #ifdef __HAVE_FAST_SOFTINTS Index: sys/arch/arm/arm32/armv7_generic_space.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/armv7_generic_space.c,v retrieving revision 1.10 diff -u -p -r1.10 armv7_generic_space.c --- sys/arch/arm/arm32/armv7_generic_space.c 19 Nov 2018 10:45:47 -0000 1.10 +++ sys/arch/arm/arm32/armv7_generic_space.c 26 Feb 2020 11:59:35 -0000 @@ -319,7 +319,7 @@ armv7_generic_bs_map(void *t, bus_addr_t else if (flag & BUS_SPACE_MAP_CACHEABLE) pmapflags = 0; else - pmapflags = PMAP_NOCACHE; + pmapflags = PMAP_DEV; for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags); Index: sys/arch/arm/arm32/bus_dma.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/bus_dma.c,v retrieving revision 1.115.2.2 diff -u -p -r1.115.2.2 bus_dma.c --- sys/arch/arm/arm32/bus_dma.c 6 Nov 2019 09:43:19 -0000 1.115.2.2 +++ sys/arch/arm/arm32/bus_dma.c 26 Feb 2020 11:59:35 -0000 @@ -315,6 +315,7 @@ _bus_dmamap_create(bus_dma_tag_t t, bus_ { struct arm32_bus_dmamap *map; void *mapstore; + int error = 0; #ifdef DEBUG_DMA printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx" @@ -357,7 +358,6 @@ _bus_dmamap_create(bus_dma_tag_t t, bus_ struct arm32_bus_dma_cookie *cookie; int cookieflags; void *cookiestore; - int error; cookieflags = 0; @@ -404,7 +404,7 @@ _bus_dmamap_create(bus_dma_tag_t t, bus_ #ifdef DEBUG_DMA printf("dmamap_create:map=%p\n", map); #endif /* DEBUG_DMA */ - return 0; + return error; } /* @@ -1450,6 +1450,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma else if (uncached) pmap_flags |= PMAP_NOCACHE; +printf("%s: mapping va %" PRIxVADDR " with flags %x pre %d c=%d/%d dr %p\n", __func__, va, pmap_flags, !!prefetchable, uncached, !!((flags & BUS_DMA_COHERENT)), dr); pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmap_flags); } Index: sys/arch/arm/arm32/cpu.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/cpu.c,v retrieving revision 1.129.4.2 diff -u -p -r1.129.4.2 cpu.c --- sys/arch/arm/arm32/cpu.c 9 Jan 2020 17:16:47 -0000 1.129.4.2 +++ sys/arch/arm/arm32/cpu.c 26 Feb 2020 11:59:35 -0000 @@ -888,3 +888,16 @@ identify_features(device_t dv) "pfr: [0]=%#x [1]=%#x\n", cpu_processor_features[0], cpu_processor_features[1]); } + +#ifdef _ARM_ARCH_6 +int +cpu_maxproc_hook(int nmaxproc) +{ + +#ifdef ARM_MMU_EXTENDED + return pmap_maxproc_set(nmaxproc); +#else + return 0; +#endif +} +#endif Index: sys/arch/arm/arm32/pmap.c =================================================================== RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v retrieving revision 1.373.2.1 diff -u -p -r1.373.2.1 pmap.c --- sys/arch/arm/arm32/pmap.c 10 Feb 2020 19:20:01 -0000 1.373.2.1 +++ sys/arch/arm/arm32/pmap.c 26 Feb 2020 11:59:37 -0000 @@ -778,6 +778,19 @@ static void pmap_init_l1(struct l1_ttab #endif static vaddr_t kernel_pt_lookup(paddr_t); +#ifdef ARM_MMU_EXTENDED +static struct pool_cache pmap_l1tt_cache; + +static int pmap_l1tt_ctor(void *, void *, int); +static void * pmap_l1tt_alloc(struct pool *, int); +static void pmap_l1tt_free(struct pool *, void *); + +static struct pool_allocator pmap_l1tt_allocator = { + .pa_alloc = pmap_l1tt_alloc, + .pa_free = pmap_l1tt_free, + .pa_pagesz = L1TT_SIZE, +}; +#endif /* * Misc variables @@ -1290,6 +1303,29 @@ pmap_modify_pv(struct vm_page_md *md, pa return oflags; } + +#if defined(ARM_MMU_EXTENDED) +int +pmap_maxproc_set(int nmaxproc) +{ + static const char pmap_l1ttpool_warnmsg[] = + "WARNING: l1ttpool limit reached; increase kern.maxproc"; + +// pool_cache_setlowat(&pmap_l1tt_cache, nmaxproc); + + /* + * Set the hard limit on the pmap_l1tt_cache to the number + * of processes the kernel is to support. Log the limit + * reached message max once a minute. + */ + pool_cache_sethardlimit(&pmap_l1tt_cache, nmaxproc, + pmap_l1ttpool_warnmsg, 60); + + return 0; +} + +#endif + /* * Allocate an L1 translation table for the specified pmap. * This is called at pmap creation time. @@ -1298,33 +1334,11 @@ static void pmap_alloc_l1(pmap_t pm) { #ifdef ARM_MMU_EXTENDED -#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS - struct vm_page *pg; - bool ok __diagused; - for (;;) { -#ifdef PMAP_NEED_ALLOC_POOLPAGE - pg = arm_pmap_alloc_poolpage(UVM_PGA_ZERO); -#else - pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); -#endif - if (pg != NULL) - break; - uvm_wait("pmapl1alloc"); - } - pm->pm_l1_pa = VM_PAGE_TO_PHYS(pg); - vaddr_t va = pmap_direct_mapped_phys(pm->pm_l1_pa, &ok, 0); - KASSERT(ok); - KASSERT(va >= KERNEL_BASE); + vaddr_t va = (vaddr_t)pool_cache_get_paddr(&pmap_l1tt_cache, PR_WAITOK, + &pm->pm_l1_pa); -#else - KASSERTMSG(kernel_map != NULL, "pm %p", pm); - vaddr_t va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, - UVM_KMF_WIRED|UVM_KMF_ZERO); - KASSERT(va); - pmap_extract(pmap_kernel(), va, &pm->pm_l1_pa); -#endif pm->pm_l1 = (pd_entry_t *)va; - PTE_SYNC_RANGE(pm->pm_l1, PAGE_SIZE / sizeof(pt_entry_t)); + PTE_SYNC_RANGE(pm->pm_l1, L1TT_SIZE / sizeof(pt_entry_t)); #else struct l1_ttable *l1; uint8_t domain; @@ -1369,12 +1383,8 @@ static void pmap_free_l1(pmap_t pm) { #ifdef ARM_MMU_EXTENDED -#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS - struct vm_page *pg = PHYS_TO_VM_PAGE(pm->pm_l1_pa); - uvm_pagefree(pg); -#else - uvm_km_free(kernel_map, (vaddr_t)pm->pm_l1, PAGE_SIZE, UVM_KMF_WIRED); -#endif + pool_cache_put_paddr(&pmap_l1tt_cache, (void *)pm->pm_l1, pm->pm_l1_pa); + pm->pm_l1 = NULL; pm->pm_l1_pa = 0; #else @@ -1671,6 +1681,24 @@ pmap_free_l2_bucket(pmap_t pm, struct l2 pmap_free_l2_dtable(l2); } +#if defined(ARM_MMU_EXTENDED) +/* + * Pool cache constructors for L1 translation tables + */ + +static int +pmap_l1tt_ctor(void *arg, void *v, int flags) +{ +#ifndef PMAP_INCLUDE_PTE_SYNC +#error not supported +#endif + + memset(v, 0, L1TT_SIZE); + PTE_SYNC_RANGE(v, L1TT_SIZE / sizeof(pt_entry_t)); + return 0; +} +#endif + /* * Pool cache constructors for L2 descriptor tables, metadata and pmap * structures. @@ -3740,8 +3768,11 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v if (!(flags & PMAP_NOCACHE)) npte |= pte_l2_s_cache_mode_pt; } else { - switch (flags & PMAP_CACHE_MASK) { + switch (flags & (PMAP_CACHE_MASK | PMAP_DEV_MASK)) { + case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK: + break; case PMAP_NOCACHE: + npte |= pte_l2_s_nocache_mode; break; case PMAP_WRITE_COMBINE: npte |= pte_l2_s_wc_mode; @@ -5803,7 +5834,7 @@ pmap_grow_map(vaddr_t va, paddr_t *pap) struct vm_page_md *md __diagused = VM_PAGE_TO_MD(pg); KASSERT(SLIST_EMPTY(&md->pvh_list)); } - +printf("%s: va %" PRIxVADDR " pa %" PRIxPADDR "\n", __func__, va, pa); pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE); @@ -5865,7 +5896,7 @@ pmap_grow_l2_bucket(pmap_t pm, vaddr_t v } pmap_kernel_l2dtable_kva = nva; - +printf("%s: pmap_kernel_l2dtable_kva %" PRIxVADDR "\n", __func__, pmap_kernel_l2dtable_kva); /* * Link it into the parent pmap */ @@ -5902,6 +5933,7 @@ pmap_grow_l2_bucket(pmap_t pm, vaddr_t v pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; +printf("%s:pmap_kernel_l2ptp_kva %" PRIxVADDR "\n", __func__, pmap_kernel_l2ptp_kva); } return l2b; @@ -5919,6 +5951,7 @@ pmap_growkernel(vaddr_t maxkvaddr) if (maxkvaddr <= pmap_curmaxkvaddr) goto out; /* we are OK */ +printf("%s: maxkvaddr %" PRIxVADDR " pmap_curmaxkvaddr %" PRIxVADDR "\n", __func__, maxkvaddr, pmap_curmaxkvaddr); NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", pmap_curmaxkvaddr, maxkvaddr)); @@ -6285,6 +6318,8 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v virtual_end = vend; VPRINTF("specials "); + +// XXXNH why PAGE_SIZE / L2_S_SIZE??? #ifdef PMAP_CACHE_VIPT /* * If we have a VIPT cache, we need one page/pte per possible alias @@ -6309,6 +6344,7 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte, nptes); pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte); pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte, nptes); + pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL); if (msgbufaddr == NULL) { pmap_alloc_specials(&virtual_avail, @@ -6484,6 +6520,17 @@ pmap_init(void) pool_setlowat(&pmap_pv_pool, (PAGE_SIZE / sizeof(struct pv_entry)) * 2); #ifdef ARM_MMU_EXTENDED + /* + * Initialise the L1 pool and cache. + */ + + pool_cache_bootstrap(&pmap_l1tt_cache, L1TT_SIZE, L1TT_SIZE, + 0, 0, "l1ttpl", &pmap_l1tt_allocator, IPL_NONE, pmap_l1tt_ctor, + NULL, NULL); + + int error __diagused = pmap_maxproc_set(maxproc); + KASSERT(error == 0); + pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); #endif @@ -6535,6 +6582,76 @@ pmap_bootstrap_pv_page_free(struct pool } } + +#if defined(ARM_MMU_EXTENDED) +static void * +pmap_l1tt_alloc(struct pool *pp, int flags) +{ + struct pglist plist; + vaddr_t va; + + const int waitok = flags & PR_WAITOK; + + int error = uvm_pglistalloc(L1TT_SIZE, 0, -1, L1TT_SIZE, 0, &plist, 1, + waitok); + if (error) + panic("Cannot allocate L1TT physical pages, %d", error); + + struct vm_page *pg = TAILQ_FIRST(&plist); +#if !defined( __HAVE_MM_MD_DIRECT_MAPPED_PHYS) + + /* Allocate a L1 translation table VA */ + va = uvm_km_alloc(kernel_map, L1TT_SIZE, L1TT_SIZE, UVM_KMF_VAONLY); + if (va == 0) + panic("Cannot allocate L1TT KVA"); + + const vaddr_t eva = va + L1TT_SIZE; + vaddr_t mva = va; + while (pg && mva < eva) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); + + pmap_kenter_pa(mva, pa, + VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE); + + mva += PAGE_SIZE; + pg = TAILQ_NEXT(pg, pageq.queue); + } + KASSERTMSG(pg == NULL && mva == eva, "pg %p mva %" PRIxVADDR + " eva %" PRIxVADDR, pg, mva, eva); +#else + bool ok; + paddr_t pa = VM_PAGE_TO_PHYS(pg); + va = pmap_direct_mapped_phys(pa, &ok, 0); + KASSERT(ok); + KASSERT(va >= KERNEL_BASE); +#endif + + return (void *)va; +} + +static void +pmap_l1tt_free(struct pool *pp, void *v) +{ + vaddr_t va = (vaddr_t)v; + +#if !defined( __HAVE_MM_MD_DIRECT_MAPPED_PHYS) + uvm_km_free(kernel_map, va, L1TT_SIZE, UVM_KMF_WIRED); +#else +#if defined(KERNEL_BASE_VOFFSET) + paddr_t pa = va - KERNEL_BASE_VOFFSET; +#else + paddr_t pa = va - KERNEL_BASE + physical_start; +#endif + const paddr_t epa = pa + L1TT_SIZE; + + for (; pa < epa; pa += PAGE_SIZE) { + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); + uvm_pagefree(pg); + } +#endif +} +#endif + /* * pmap_postinit() * @@ -6650,8 +6767,7 @@ pmap_map_section(vaddr_t l1pt, vaddr_t v switch (cache) { case PTE_NOCACHE: - default: - fl = 0; + fl = pte_l1_s_nocache_mode; break; case PTE_CACHE: @@ -6661,6 +6777,11 @@ pmap_map_section(vaddr_t l1pt, vaddr_t v case PTE_PAGETABLE: fl = pte_l1_s_cache_mode_pt; break; + + case PTE_DEV: + default: + fl = 0; + break; } const pd_entry_t npde = L1_S_PROTO | pa | @@ -6686,8 +6807,7 @@ pmap_map_entry(vaddr_t l1pt, vaddr_t va, switch (cache) { case PTE_NOCACHE: - default: - npte = 0; + npte = pte_l2_s_nocache_mode; break; case PTE_CACHE: @@ -6697,6 +6817,10 @@ pmap_map_entry(vaddr_t l1pt, vaddr_t va, case PTE_PAGETABLE: npte = pte_l2_s_cache_mode_pt; break; + + default: + npte = 0; + break; } if ((pdep[l1slot] & L1_TYPE_MASK) != L1_TYPE_C) @@ -6765,10 +6889,9 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, switch (cache) { case PTE_NOCACHE: - default: - f1 = 0; - f2l = 0; - f2s = 0; + f1 = pte_l1_s_nocache_mode; + f2l = pte_l2_l_nocache_mode; + f2s = pte_l2_s_nocache_mode; break; case PTE_CACHE: @@ -6782,6 +6905,13 @@ pmap_map_chunk(vaddr_t l1pt, vaddr_t va, f2l = pte_l2_l_cache_mode_pt; f2s = pte_l2_s_cache_mode_pt; break; + + case PTE_DEV: + default: + f1 = 0; + f2l = 0; + f2s = 0; + break; } size = resid; @@ -6993,16 +7123,19 @@ pmap_devmap_find_va(vaddr_t va, vsize_t * them (though, they shouldn't). */ +pt_entry_t pte_l1_s_nocache_mode; pt_entry_t pte_l1_s_cache_mode; pt_entry_t pte_l1_s_wc_mode; pt_entry_t pte_l1_s_cache_mode_pt; pt_entry_t pte_l1_s_cache_mask; +pt_entry_t pte_l2_l_nocache_mode; pt_entry_t pte_l2_l_cache_mode; pt_entry_t pte_l2_l_wc_mode; pt_entry_t pte_l2_l_cache_mode_pt; pt_entry_t pte_l2_l_cache_mask; +pt_entry_t pte_l2_s_nocache_mode; pt_entry_t pte_l2_s_cache_mode; pt_entry_t pte_l2_s_wc_mode; pt_entry_t pte_l2_s_cache_mode_pt; @@ -7036,14 +7169,17 @@ void pmap_pte_init_generic(void) { + pte_l1_s_nocache_mode = 0; pte_l1_s_cache_mode = L1_S_B|L1_S_C; pte_l1_s_wc_mode = L1_S_B; pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; + pte_l2_l_nocache_mode = 0; pte_l2_l_cache_mode = L2_B|L2_C; pte_l2_l_wc_mode = L2_B; pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; + pte_l2_s_nocache_mode = 0; pte_l2_s_cache_mode = L2_B|L2_C; pte_l2_s_wc_mode = L2_B; pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; @@ -7427,12 +7563,11 @@ pmap_uarea(vaddr_t va) #if defined(CPU_ARM11MPCORE) - void pmap_pte_init_arm11mpcore(void) { - /* cache mode is controlled by 5 bits (B, C, TEX) */ + /* cache mode is controlled by 5 bits (B, C, TEX[2:0]) */ pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6; pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6; #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE) @@ -7511,6 +7646,54 @@ pmap_pte_init_arm11mpcore(void) #endif /* CPU_ARM11MPCORE */ +#if ARM_MMU_V6 == 1 +void +pmap_pte_init_armv6(void) +{ + /* + * The ARMv6-A MMU is mostly compatible with generic. If the + * AP field is zero, that now means "no access" rather than + * read-only. The prototypes are a little different because of + * the XN bit. + */ + pmap_pte_init_generic(); + + pte_l1_s_nocache_mode = L1_S_XS_TEX(1); + pte_l2_l_nocache_mode = L2_XS_L_TEX(1); + pte_l2_s_nocache_mode = L2_XS_T_TEX(1); + +#ifdef ARM11_COMPAT_MMU + /* with AP[0..3] */ + pte_l1_ss_proto = L1_SS_PROTO_armv6; +#else + pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6n; + pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6n; + pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv6n; + + pte_l1_ss_proto = L1_SS_PROTO_armv6; + pte_l1_s_proto = L1_S_PROTO_armv6; + pte_l1_c_proto = L1_C_PROTO_armv6; + pte_l2_s_proto = L2_S_PROTO_armv6n; + + pte_l1_s_prot_u = L1_S_PROT_U_armv6; + pte_l1_s_prot_w = L1_S_PROT_W_armv6; + pte_l1_s_prot_ro = L1_S_PROT_RO_armv6; + pte_l1_s_prot_mask = L1_S_PROT_MASK_armv6; + + pte_l2_l_prot_u = L2_L_PROT_U_armv6n; + pte_l2_l_prot_w = L2_L_PROT_W_armv6n; + pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n; + pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n; + + pte_l2_s_prot_u = L2_S_PROT_U_armv6n; + pte_l2_s_prot_w = L2_S_PROT_W_armv6n; + pte_l2_s_prot_ro = L2_S_PROT_RO_armv6n; + pte_l2_s_prot_mask = L2_S_PROT_MASK_armv6n; + +#endif +} +#endif /* ARM_MMU_V6 */ + #if ARM_MMU_V7 == 1 void pmap_pte_init_armv7(void) @@ -7525,6 +7708,10 @@ pmap_pte_init_armv7(void) pmap_needs_pte_sync = 1; + pte_l1_s_nocache_mode = L1_S_XS_TEX(1); + pte_l2_l_nocache_mode = L2_XS_L_TEX(1); + pte_l2_s_nocache_mode = L2_XS_T_TEX(1); + pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv7; pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv7; pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv7; @@ -7676,22 +7863,28 @@ pmap_dump(pmap_t pm) ch = '.'; } else { occ--; - switch (pte & 0x0c) { + switch (pte & 0x4c) { case 0x00: - ch = 'D'; /* No cache No buff */ + ch = 'N'; /* No cache No buff */ break; case 0x04: ch = 'B'; /* No cache buff */ break; case 0x08: - if (pte & 0x40) - ch = 'm'; - else - ch = 'C'; /* Cache No buff */ + ch = 'C'; /* Cache No buff */ break; case 0x0c: ch = 'F'; /* Cache Buff */ break; + case 0x40: + ch = 'D'; + break; + case 0x48: + ch = 'm'; /* Xscale mini-data */ + break; + default: + ch = '?'; + break; } if ((pte & L2_S_PROT_U) == L2_S_PROT_U) Index: sys/arch/arm/conf/Makefile.arm =================================================================== RCS file: /cvsroot/src/sys/arch/arm/conf/Makefile.arm,v retrieving revision 1.49.4.1 diff -u -p -r1.49.4.1 Makefile.arm --- sys/arch/arm/conf/Makefile.arm 1 Sep 2019 14:03:02 -0000 1.49.4.1 +++ sys/arch/arm/conf/Makefile.arm 26 Feb 2020 11:59:37 -0000 @@ -53,26 +53,6 @@ CPPFLAGS.cpufunc_asm_armv6.S+= -mcpu=arm CPPFLAGS.cpufunc_asm_arm11.S+= -mcpu=arm1136j-s CPPFLAGS.cpufunc_asm_xscale.S+= -mcpu=xscale -.if !empty(MACHINE_ARCH:Mearmv6*) || !empty(MACHINE_ARCH:Mearmv7*) -# XXX -# -# Workaround for alignment faults on ARMv6+, at least occur with -# axe(4) and athn(4) drivers. -# -# For ARMv6+, unaligned access is enabled by default. However, it -# cannot be used for non-cacheable memory, which is used as DMA -# buffers. This results in alignment faults above. A real fix is -# to use cacheable memory as DMA buffers. However, it breaks some -# drivers, awge(4) and vchiq(4) at least. -# -# Until we figure out problems and fix them, we choose a fail-safe -# workaround here; forbid unaligned memory access for whole kernel. -# Affects on performance is negligibly small as far as we can see. -# -# See PR kern/54486 for more details. -CFLAGS+= -mno-unaligned-access -.endif - ## ## (3) libkern and compat ## Index: sys/arch/arm/include/cpu.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/cpu.h,v retrieving revision 1.100.4.1 diff -u -p -r1.100.4.1 cpu.h --- sys/arch/arm/include/cpu.h 23 Oct 2019 19:14:19 -0000 1.100.4.1 +++ sys/arch/arm/include/cpu.h 26 Feb 2020 11:59:37 -0000 @@ -329,6 +329,10 @@ vaddr_t cpu_uarea_alloc_idlelwp(struct c */ void cpu_attach(device_t, cpuid_t); +#ifdef _ARM_ARCH_6 +int cpu_maxproc_hook(int); +#endif + #endif /* !_LOCORE */ #endif /* _KERNEL */ Index: sys/arch/arm/include/types.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/types.h,v retrieving revision 1.38 diff -u -p -r1.38 types.h --- sys/arch/arm/include/types.h 6 Apr 2019 03:06:25 -0000 1.38 +++ sys/arch/arm/include/types.h 26 Feb 2020 11:59:37 -0000 @@ -79,18 +79,19 @@ typedef int __register_t; #define __SIMPLELOCK_LOCKED 1 #define __SIMPLELOCK_UNLOCKED 0 -#define __HAVE_SYSCALL_INTERN -#define __HAVE_NEW_STYLE_BUS_H -#define __HAVE_MINIMAL_EMUL -#define __HAVE_CPU_DATA_FIRST -#define __HAVE___LWP_GETPRIVATE_FAST #define __HAVE_COMMON___TLS_GET_ADDR -#define __HAVE_TLS_VARIANT_I +#define __HAVE_CPU_DATA_FIRST +#define __HAVE_MINIMAL_EMUL +#define __HAVE_NEW_STYLE_BUS_H #define __HAVE_OLD_DISKLABEL +#define __HAVE_SYSCALL_INTERN +#define __HAVE_TLS_VARIANT_I +#define __HAVE___LWP_GETPRIVATE_FAST #if defined(__ARM_EABI__) && defined(_ARM_ARCH_6) #define __HAVE_ATOMIC64_OPS #endif #if defined(_ARM_ARCH_6) +#define __HAVE_MAXPROC_HOOK #define __HAVE_UCAS_MP #endif Index: sys/arch/arm/include/arm32/param.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/arm32/param.h,v retrieving revision 1.27 diff -u -p -r1.27 param.h --- sys/arch/arm/include/arm32/param.h 19 Jun 2019 09:53:39 -0000 1.27 +++ sys/arch/arm/include/arm32/param.h 26 Feb 2020 11:59:37 -0000 @@ -46,12 +46,8 @@ * this file. */ #ifndef PGSHIFT -#if defined(_ARM_ARCH_6) -#define PGSHIFT 13 /* LOG2(NBPG) */ -#else #define PGSHIFT 12 /* LOG2(NBPG) */ #endif -#endif #define NBPG (1 << PGSHIFT) /* bytes/page */ #define PGOFSET (NBPG - 1) /* byte offset into page */ #define NPTEPG (NBPG / sizeof(pt_entry_t)) /* PTEs per Page */ Index: sys/arch/arm/include/arm32/pmap.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/arm32/pmap.h,v retrieving revision 1.156 diff -u -p -r1.156 pmap.h --- sys/arch/arm/include/arm32/pmap.h 18 Oct 2018 09:01:52 -0000 1.156 +++ sys/arch/arm/include/arm32/pmap.h 26 Feb 2020 11:59:37 -0000 @@ -221,7 +221,7 @@ struct pmap_devmap { .pd_pa = DEVMAP_ALIGN(pa), \ .pd_size = DEVMAP_SIZE(sz), \ .pd_prot = VM_PROT_READ|VM_PROT_WRITE, \ - .pd_cache = PTE_NOCACHE \ + .pd_cache = PTE_DEV \ } #define DEVMAP_ENTRY_END { 0 } @@ -302,6 +302,7 @@ extern bool arm_has_tlbiasid_p; /* also #define PTE_NOCACHE 0 #define PTE_CACHE 1 #define PTE_PAGETABLE 2 +#define PTE_DEV 3 /* * Flags that indicate attributes of pages or mappings of pages. @@ -366,6 +367,9 @@ u_int arm32_mmap_flags(paddr_t); #define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn) #define PMAP_PTE 0x10000000 /* kenter_pa */ +#define PMAP_DEV 0x20000000 /* kenter_pa */ +#define PMAP_DEV_SO 0x40000000 /* kenter_pa */ +#define PMAP_DEV_MASK (PMAP_DEV | PMAP_DEV_SO) /* * Functions that we need to export @@ -383,6 +387,10 @@ bool pmap_extract(pmap_t, vaddr_t, paddr void pmap_prefer(vaddr_t, vaddr_t *, int); #endif +#ifdef ARM_MMU_EXTENDED +int pmap_maxproc_set(int); +#endif + void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t); /* Functions we use internally. */ @@ -658,6 +666,9 @@ void pmap_pte_init_arm11(void); #if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */ void pmap_pte_init_arm11mpcore(void); #endif +#if ARM_MMU_V6 == 1 +void pmap_pte_init_armv6(void); +#endif /* ARM_MMU_V6 */ #if ARM_MMU_V7 == 1 void pmap_pte_init_armv7(void); #endif /* ARM_MMU_V7 */ @@ -679,14 +690,13 @@ void xscale_setup_minidata(vaddr_t, vadd void pmap_uarea(vaddr_t); #endif /* ARM_MMU_XSCALE == 1 */ -extern pt_entry_t pte_l1_s_cache_mode; -extern pt_entry_t pte_l1_s_cache_mask; +extern pt_entry_t pte_l1_s_nocache_mode; +extern pt_entry_t pte_l2_l_nocache_mode; +extern pt_entry_t pte_l2_s_nocache_mode; +extern pt_entry_t pte_l1_s_cache_mode; extern pt_entry_t pte_l2_l_cache_mode; -extern pt_entry_t pte_l2_l_cache_mask; - extern pt_entry_t pte_l2_s_cache_mode; -extern pt_entry_t pte_l2_s_cache_mask; extern pt_entry_t pte_l1_s_cache_mode_pt; extern pt_entry_t pte_l2_l_cache_mode_pt; @@ -696,6 +706,10 @@ extern pt_entry_t pte_l1_s_wc_mode; extern pt_entry_t pte_l2_l_wc_mode; extern pt_entry_t pte_l2_s_wc_mode; +extern pt_entry_t pte_l1_s_cache_mask; +extern pt_entry_t pte_l2_l_cache_mask; +extern pt_entry_t pte_l2_s_cache_mask; + extern pt_entry_t pte_l1_s_prot_u; extern pt_entry_t pte_l1_s_prot_w; extern pt_entry_t pte_l1_s_prot_ro; Index: sys/arch/arm/include/arm32/pte.h =================================================================== RCS file: /cvsroot/src/sys/arch/arm/include/arm32/pte.h,v retrieving revision 1.20 diff -u -p -r1.20 pte.h --- sys/arch/arm/include/arm32/pte.h 19 Jun 2019 09:54:15 -0000 1.20 +++ sys/arch/arm/include/arm32/pte.h 26 Feb 2020 11:59:37 -0000 @@ -141,6 +141,8 @@ typedef uint32_t pt_entry_t; /* L2 table #define L1_TABLE_SIZE_REAL 0x4000 /* 16K */ #define L2_TABLE_SIZE_REAL 0x400 /* 1K */ +#define L1TT_SIZE 0x2000 /* 8K */ + /* * ARM L1 Descriptors */ Index: sys/arch/evbarm/conf/GENERIC =================================================================== RCS file: /cvsroot/src/sys/arch/evbarm/conf/GENERIC,v retrieving revision 1.48.2.6 diff -u -p -r1.48.2.6 GENERIC --- sys/arch/evbarm/conf/GENERIC 27 Jan 2020 07:06:02 -0000 1.48.2.6 +++ sys/arch/evbarm/conf/GENERIC 26 Feb 2020 11:59:37 -0000 @@ -275,14 +275,14 @@ options MULTIPROCESSOR pseudo-device openfirm # /dev/openfirm -#options DIAGNOSTIC # internal consistency checks -#options DEBUG +options DIAGNOSTIC # internal consistency checks +options DEBUG #options LOCKDEBUG #options PMAP_DEBUG # Enable pmap_debug_level code options MSGBUFSIZE=32768 -#options VERBOSE_INIT_ARM # verbose bootstrapping messages +options VERBOSE_INIT_ARM # verbose bootstrapping messages # EARLYCONS is required for early init messages from VERBOSE_INIT_ARM. @@ -308,7 +308,7 @@ options MSGBUFSIZE=32768 #options EARLYCONS=sunxi, CONSADDR=0x01c28000 # TEGRA TK1 -#options EARLYCONS=tegra, CONSADDR=0x70006300 +options EARLYCONS=tegra, CONSADDR=0x70006300 #options EARLYCONS=vexpress, CONSADDR=0x1c090000 #options EARLYCONS=virt, CONSADDR=0x09000000 Index: sys/arch/mips/adm5120/dev/ahci.c =================================================================== RCS file: /cvsroot/src/sys/arch/mips/adm5120/dev/ahci.c,v retrieving revision 1.17 diff -u -p -r1.17 ahci.c --- sys/arch/mips/adm5120/dev/ahci.c 17 Feb 2019 04:17:52 -0000 1.17 +++ sys/arch/mips/adm5120/dev/ahci.c 26 Feb 2020 11:59:42 -0000 @@ -912,6 +912,9 @@ ahci_device_ctrl_start(struct usbd_xfer usb_transfer_complete(xfer); mutex_exit(&sc->sc_lock); + + usb_freemem(&sc->sc_bus, &reqdma); + return USBD_NORMAL_COMPLETION; } Index: sys/dev/fdt/fdt_regulator.c =================================================================== RCS file: /cvsroot/src/sys/dev/fdt/fdt_regulator.c,v retrieving revision 1.8 diff -u -p -r1.8 fdt_regulator.c --- sys/dev/fdt/fdt_regulator.c 27 May 2019 23:18:33 -0000 1.8 +++ sys/dev/fdt/fdt_regulator.c 26 Feb 2020 11:59:51 -0000 @@ -101,7 +101,7 @@ fdtbus_regulator_acquire(int phandle, co error = rc->rc_funcs->acquire(rc->rc_dev); if (error) { - aprint_error_dev(rc->rc_dev, "failed to acquire regulator: %d\n", error); + aprint_error_dev(rc->rc_dev, "failed to acquire regulator %d: %d\n", regulator_phandle, error); return NULL; } Index: sys/dev/hdaudio/hdafg.c =================================================================== RCS file: /cvsroot/src/sys/dev/hdaudio/hdafg.c,v retrieving revision 1.18 diff -u -p -r1.18 hdafg.c --- sys/dev/hdaudio/hdafg.c 8 Jun 2019 08:02:38 -0000 1.18 +++ sys/dev/hdaudio/hdafg.c 26 Feb 2020 11:59:51 -0000 @@ -3940,7 +3940,7 @@ hdafg_round_blocksize(void *opaque, int { struct hdaudio_audiodev *ad = opaque; struct hdaudio_stream *st; - int bufsize; + int bufsize, nblksize; st = (mode == AUMODE_PLAY) ? ad->ad_playback : ad->ad_capture; if (st == NULL) { @@ -3958,12 +3958,22 @@ hdafg_round_blocksize(void *opaque, int * https://www.intel.co.jp/content/www/jp/ja/standards/high-definition-audio-specification.html , p70. */ + if (blksize > 8192) + blksize = 8192; + else if (blksize < 0) + blksize = 128; + + for (nblksize = 128; nblksize < blksize; nblksize <<= 1) + ; + /* Make sure there are enough BDL descriptors */ bufsize = st->st_data.dma_size; - if (bufsize > HDAUDIO_BDL_MAX * blksize) { + if (bufsize > HDAUDIO_BDL_MAX * nblksize) { blksize = bufsize / HDAUDIO_BDL_MAX; + for (nblksize = 128; nblksize < blksize; nblksize <<= 1) + ; } - return blksize; + return nblksize; } static int Index: sys/dev/ic/com.c =================================================================== RCS file: /cvsroot/src/sys/dev/ic/com.c,v retrieving revision 1.355 diff -u -p -r1.355 com.c --- sys/dev/ic/com.c 11 Jan 2019 23:10:40 -0000 1.355 +++ sys/dev/ic/com.c 26 Feb 2020 11:59:52 -0000 @@ -402,9 +402,13 @@ com_enable_debugport(struct com_softc *s sc->sc_ier = IER_ERLS; if (sc->sc_type == COM_TYPE_PXA2x0) sc->sc_ier |= IER_EUART | IER_ERXTOUT; +#if 1 + if (sc->sc_type == COM_TYPE_INGENIC) +#else if (sc->sc_type == COM_TYPE_INGENIC || sc->sc_type == COM_TYPE_TEGRA) - sc->sc_ier |= IER_ERXTOUT; +#endif + sc->sc_ier |= IER_ERXTOUT; CSR_WRITE_1(&sc->sc_regs, COM_REG_IER, sc->sc_ier); SET(sc->sc_mcr, MCR_DTR | MCR_RTS); CSR_WRITE_1(&sc->sc_regs, COM_REG_MCR, sc->sc_mcr); @@ -519,11 +523,12 @@ com_attach_subr(struct com_softc *sc) goto fifodelay; case COM_TYPE_TEGRA: - sc->sc_fifolen = 8; - fifo_msg = "Tegra UART, working fifo"; - SET(sc->sc_hwflags, COM_HW_FIFO); - CSR_WRITE_1(regsp, COM_REG_FIFO, - FIFO_ENABLE | FIFO_RCV_RST | FIFO_XMT_RST | FIFO_TRIGGER_1); +// sc->sc_fifolen = 8; + fifo_msg = "Tegra UART, disabled fifo"; + SET(sc->sc_hwflags, COM_HW_TXFIFO_DISABLE); +// SET(sc->sc_hwflags, COM_HW_FIFO); +// CSR_WRITE_1(regsp, COM_REG_FIFO, +// FIFO_ENABLE | FIFO_RCV_RST | FIFO_XMT_RST | FIFO_TRIGGER_1); goto fifodelay; case COM_TYPE_BCMAUXUART: @@ -539,7 +544,7 @@ com_attach_subr(struct com_softc *sc) /* look for a NS 16550AF UART with FIFOs */ if (sc->sc_type == COM_TYPE_INGENIC) { CSR_WRITE_1(regsp, COM_REG_FIFO, - FIFO_ENABLE | FIFO_RCV_RST | FIFO_XMT_RST | + FIFO_ENABLE | FIFO_RCV_RST | FIFO_XMT_RST | FIFO_TRIGGER_14 | FIFO_UART_ON); } else CSR_WRITE_1(regsp, COM_REG_FIFO, @@ -1547,8 +1552,8 @@ comparam(struct tty *tp, struct termios */ if (sc->sc_type == COM_TYPE_HAYESP) { sc->sc_fifo = FIFO_DMA_MODE | FIFO_ENABLE | FIFO_TRIGGER_8; - } else if (sc->sc_type == COM_TYPE_TEGRA) { - sc->sc_fifo = FIFO_ENABLE | FIFO_TRIGGER_1; +// } else if (sc->sc_type == COM_TYPE_TEGRA) { +// sc->sc_fifo = FIFO_ENABLE | FIFO_TRIGGER_1; } else if (ISSET(sc->sc_hwflags, COM_HW_FIFO)) { if (t->c_ospeed <= 1200) sc->sc_fifo = FIFO_ENABLE | FIFO_TRIGGER_1; @@ -1986,8 +1991,12 @@ com_rxsoft(struct com_softc *sc, struct SET(sc->sc_ier, IER_ERXRDY); if (sc->sc_type == COM_TYPE_PXA2x0) SET(sc->sc_ier, IER_ERXTOUT); - if (sc->sc_type == COM_TYPE_INGENIC || +#if 1 + if (sc->sc_type == COM_TYPE_INGENIC) +#else + if (sc->sc_type == COM_TYPE_INGENIC || sc->sc_type == COM_TYPE_TEGRA) +#endif SET(sc->sc_ier, IER_ERXTOUT); CSR_WRITE_1(&sc->sc_regs, COM_REG_IER, @@ -2111,8 +2120,7 @@ comintr(void *arg) } /* DesignWare APB UART BUSY interrupt */ - if (sc->sc_type == COM_TYPE_DW_APB && - (iir & IIR_BUSY) == IIR_BUSY) { + if (sc->sc_type == COM_TYPE_DW_APB && (iir & IIR_BUSY) == IIR_BUSY) { if ((CSR_READ_1(regsp, COM_REG_USR) & 0x1) != 0) { CSR_WRITE_1(regsp, COM_REG_HALT, HALT_CHCFG_EN); CSR_WRITE_1(regsp, COM_REG_LCR, sc->sc_lcr | LCR_DLAB); @@ -2226,7 +2234,7 @@ again: do { CLR(sc->sc_ier, IER_ERXRDY|IER_ERXTOUT); break; case COM_TYPE_INGENIC: - case COM_TYPE_TEGRA: +// case COM_TYPE_TEGRA: CLR(sc->sc_ier, IER_ERXRDY | IER_ERXTOUT); break; @@ -2321,7 +2329,10 @@ again: do { sc->sc_tbc -= n; sc->sc_tba += n; } else { - /* Disable transmit completion interrupts if necessary. */ + /* + * Disable transmit completion interrupts if + * necessary. + */ if (ISSET(sc->sc_ier, IER_ETXRDY)) { CLR(sc->sc_ier, IER_ETXRDY); CSR_WRITE_1(regsp, COM_REG_IER, sc->sc_ier); @@ -2449,7 +2460,7 @@ cominit(struct com_regs *regsp, int rate CSR_WRITE_2(regsp, COM_REG_DLBL, rate); } else { /* no EFR on alchemy */ - if ((type != COM_TYPE_16550_NOERS) && + if ((type != COM_TYPE_16550_NOERS) && (type != COM_TYPE_INGENIC)) { CSR_WRITE_1(regsp, COM_REG_LCR, LCR_EERS); CSR_WRITE_1(regsp, COM_REG_EFR, 0); Index: sys/dev/usb/ehci.c =================================================================== RCS file: /cvsroot/src/sys/dev/usb/ehci.c,v retrieving revision 1.267.2.1 diff -u -p -r1.267.2.1 ehci.c --- sys/dev/usb/ehci.c 10 Feb 2020 18:50:29 -0000 1.267.2.1 +++ sys/dev/usb/ehci.c 26 Feb 2020 11:59:53 -0000 @@ -3813,7 +3813,7 @@ Static void ehci_device_ctrl_close(struct usbd_pipe *pipe) { ehci_softc_t *sc = EHCI_PIPE2SC(pipe); - /*struct ehci_pipe *epipe = EHCI_PIPE2EPIPE(pipe);*/ + struct ehci_pipe * const epipe = EHCI_PIPE2EPIPE(pipe); EHCIHIST_FUNC(); EHCIHIST_CALLED(); @@ -3822,6 +3822,8 @@ ehci_device_ctrl_close(struct usbd_pipe DPRINTF("pipe=%#jx", (uintptr_t)pipe, 0, 0, 0); ehci_close_pipe(pipe, sc->sc_async_head); + + usb_freemem(&sc->sc_bus, &epipe->ctrl.reqdma); } /* Index: sys/dev/usb/ohci.c =================================================================== RCS file: /cvsroot/src/sys/dev/usb/ohci.c,v retrieving revision 1.289.4.3 diff -u -p -r1.289.4.3 ohci.c --- sys/dev/usb/ohci.c 14 Dec 2019 12:35:58 -0000 1.289.4.3 +++ sys/dev/usb/ohci.c 26 Feb 2020 11:59:54 -0000 @@ -2915,6 +2915,8 @@ ohci_device_ctrl_close(struct usbd_pipe DPRINTF("pipe=%#jx", (uintptr_t)pipe, 0, 0, 0); ohci_close_pipe(pipe, sc->sc_ctrl_head); ohci_free_std_locked(sc, opipe->tail.td); + + usb_freemem(&sc->sc_bus, &opipe->ctrl.reqdma); } /************************/ Index: sys/dev/usb/uhci.c =================================================================== RCS file: /cvsroot/src/sys/dev/usb/uhci.c,v retrieving revision 1.288 diff -u -p -r1.288 uhci.c --- sys/dev/usb/uhci.c 17 Feb 2019 04:17:52 -0000 1.288 +++ sys/dev/usb/uhci.c 26 Feb 2020 11:59:55 -0000 @@ -2848,6 +2848,7 @@ uhci_device_ctrl_close(struct usbd_pipe uhci_free_std_locked(sc, upipe->ctrl.setup); uhci_free_std_locked(sc, upipe->ctrl.stat); + usb_freemem(&sc->sc_bus, &upipe->ctrl.reqdma); } /* Abort a device interrupt request. */ Index: sys/external/bsd/dwc2/dwc2.c =================================================================== RCS file: /cvsroot/src/sys/external/bsd/dwc2/dwc2.c,v retrieving revision 1.59.4.1 diff -u -p -r1.59.4.1 dwc2.c --- sys/external/bsd/dwc2/dwc2.c 9 Dec 2019 13:06:38 -0000 1.59.4.1 +++ sys/external/bsd/dwc2/dwc2.c 26 Feb 2020 11:59:58 -0000 @@ -767,9 +767,13 @@ dwc2_device_ctrl_abort(struct usbd_xfer Static void dwc2_device_ctrl_close(struct usbd_pipe *pipe) { + struct dwc2_softc * const sc = DWC2_PIPE2SC(pipe); + struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe); DPRINTF("pipe=%p\n", pipe); dwc2_close_pipe(pipe); + + usb_freemem(&sc->sc_bus, &dpipe->req_dma); } Static void Index: sys/kern/init_sysctl.c =================================================================== RCS file: /cvsroot/src/sys/kern/init_sysctl.c,v retrieving revision 1.222 diff -u -p -r1.222 init_sysctl.c --- sys/kern/init_sysctl.c 15 Jan 2019 07:11:23 -0000 1.222 +++ sys/kern/init_sysctl.c 26 Feb 2020 12:00:03 -0000 @@ -858,6 +858,13 @@ sysctl_kern_maxproc(SYSCTLFN_ARGS) if (nmaxproc > cpu_maxproc()) return (EINVAL); #endif + error = 0; +#ifdef __HAVE_MAXPROC_HOOK + error = cpu_maxproc_hook(nmaxproc); +#endif + if (error) + return error; + maxproc = nmaxproc; return (0); Index: sys/uvm/uvm_pglist.c =================================================================== RCS file: /cvsroot/src/sys/uvm/uvm_pglist.c,v retrieving revision 1.72 diff -u -p -r1.72 uvm_pglist.c --- sys/uvm/uvm_pglist.c 13 Nov 2018 10:31:01 -0000 1.72 +++ sys/uvm/uvm_pglist.c 26 Feb 2020 12:00:09 -0000 @@ -305,19 +305,22 @@ uvm_pglistalloc_c_ps(uvm_physseg_t psi, static int uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment, - paddr_t boundary, struct pglist *rlist) + paddr_t boundary, struct pglist *rlist, int waitok) { int fl; int error; + int count = 0; uvm_physseg_t psi; /* Default to "lose". */ error = ENOMEM; +again: /* * Block all memory allocation and lock the free list. */ mutex_spin_enter(&uvm_fpageqlock); + count++; /* Are there even any free pages? */ if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) @@ -346,7 +349,6 @@ uvm_pglistalloc_contig(int num, paddr_t } } } - out: /* * check to see if we need to generate some free pages waking @@ -355,6 +357,19 @@ out: uvm_kick_pdaemon(); mutex_spin_exit(&uvm_fpageqlock); + + if (error) { + if (waitok) { + /* XXX perhaps some time limitation? */ +#ifdef DEBUG + if (count == 1) + printf("pglistallocc waiting\n"); +#endif + uvm_wait("pglallocc"); + goto again; + } else + uvm_pglistfree(rlist); + } return (error); } @@ -406,16 +421,14 @@ again: goto again; } #if defined(DEBUG) - { - paddr_t cidx = 0; - const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx); - KDASSERTMSG(bank == psi, - "uvm_physseg_find(%#x) (%"PRIxPHYSSEG ") != psi %"PRIxPHYSSEG, - candidate, bank, psi); - KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi), - "uvm_physseg_find(%#x): %#"PRIxPADDR" != off %"PRIxPADDR, - candidate, cidx, candidate - uvm_physseg_get_start(psi)); - } + paddr_t cidx = 0; + const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx); + KDASSERTMSG(bank == psi, + "uvm_physseg_find(%#x) (%"PRIxPHYSSEG ") != psi %"PRIxPHYSSEG, + candidate, bank, psi); + KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi), + "uvm_physseg_find(%#x): %#"PRIxPADDR" != off %"PRIxPADDR, + candidate, cidx, candidate - uvm_physseg_get_start(psi)); #endif if (VM_PAGE_IS_FREE(pg) == 0) continue; @@ -502,9 +515,9 @@ out: /* XXX perhaps some time limitation? */ #ifdef DEBUG if (count == 1) - printf("pglistalloc waiting\n"); + printf("pglistallocs waiting\n"); #endif - uvm_wait("pglalloc"); + uvm_wait("pglallocs"); goto again; } else uvm_pglistfree(rlist); @@ -543,7 +556,7 @@ uvm_pglistalloc(psize_t size, paddr_t lo if ((nsegs < size >> PAGE_SHIFT) || (alignment != PAGE_SIZE) || (boundary != 0)) res = uvm_pglistalloc_contig(num, low, high, alignment, - boundary, rlist); + boundary, rlist, waitok); else res = uvm_pglistalloc_simple(num, low, high, rlist, waitok);