Index: sys/uvm/uvm_glue.c =================================================================== RCS file: /home/netbsd/src/sys/uvm/uvm_glue.c,v retrieving revision 1.176 diff -p -u -r1.176 uvm_glue.c --- sys/uvm/uvm_glue.c 12 Jan 2020 12:55:03 -0000 1.176 +++ sys/uvm/uvm_glue.c 18 Feb 2020 02:37:59 -0000 @@ -262,12 +262,11 @@ uarea_poolpage_alloc(struct pool *pp, in } #endif #if defined(__HAVE_CPU_UAREA_ROUTINES) - void *va = cpu_uarea_alloc(false); - if (va) - return (void *)va; -#endif + return cpu_uarea_alloc(false); +#else return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz, USPACE_ALIGN, UVM_KMF_WIRED | UVM_KMF_WAITVA); +#endif } static void @@ -284,11 +283,12 @@ uarea_poolpage_free(struct pool *pp, voi } #endif #if defined(__HAVE_CPU_UAREA_ROUTINES) - if (cpu_uarea_free(addr)) - return; -#endif + bool used __diagused = cpu_uarea_free(addr); + KASSERT(used); +#else uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz, UVM_KMF_WIRED); +#endif } static struct pool_allocator uvm_uarea_allocator = { @@ -332,6 +332,13 @@ void uvm_uarea_init(void) { int flags = PR_NOTOUCH; +#if defined(__HAVE_CPU_UAREA_ROUTINES) + /* + * Let pool subsystem know that cpu_uarea_alloc() may fail even for + * PR_WAITOK, when no contiguous pages available. + */ + flags |= PR_MAYFAIL; +#endif /* * specify PR_NOALIGN unless the alignment provided by Index: sys/uvm/uvm_pglist.c =================================================================== RCS file: /home/netbsd/src/sys/uvm/uvm_pglist.c,v retrieving revision 1.80 diff -p -u -r1.80 uvm_pglist.c --- sys/uvm/uvm_pglist.c 20 Feb 2020 04:54:47 -0000 1.80 +++ sys/uvm/uvm_pglist.c 20 Feb 2020 06:33:36 -0000 @@ -310,8 +310,10 @@ uvm_pglistalloc_contig(int num, paddr_t /* Are there even any free pages? */ if (uvm_availmem() <= - (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) + (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) { + printf("%s: no enough memory\n", __func__); goto out; + } for (fl = 0; fl < VM_NFREELIST; fl++) { #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) @@ -337,6 +339,8 @@ uvm_pglistalloc_contig(int num, paddr_t } } + printf("%s: no contiguous pages\n", __func__); + out: /* * check to see if we need to generate some free pages waking Index: sys/kern/subr_pool.c =================================================================== RCS file: /home/netbsd/src/sys/kern/subr_pool.c,v retrieving revision 1.266 diff -p -u -r1.266 subr_pool.c --- sys/kern/subr_pool.c 8 Feb 2020 07:07:07 -0000 1.266 +++ sys/kern/subr_pool.c 18 Feb 2020 02:37:59 -0000 @@ -1093,7 +1093,8 @@ pool_get(struct pool *pp, int flags) pp->pr_nfail++; mutex_exit(&pp->pr_lock); - KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); + KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0 || + (pp->pr_roflags & PR_MAYFAIL) != 0); return NULL; } @@ -1136,7 +1137,8 @@ pool_get(struct pool *pp, int flags) pp->pr_nfail++; mutex_exit(&pp->pr_lock); - KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); + KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0 || + (pp->pr_roflags & PR_MAYFAIL) != 0); return NULL; } @@ -2498,7 +2500,8 @@ pool_cache_get_slow(pool_cache_cpu_t *cc object = pool_get(&pc->pc_pool, flags); *objectp = object; if (__predict_false(object == NULL)) { - KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0); + KASSERT((flags & (PR_NOWAIT|PR_LIMITFAIL)) != 0 || + (pc->pc_pool.pr_roflags & PR_MAYFAIL) != 0); return false; } Index: sys/sys/pool.h =================================================================== RCS file: /home/netbsd/src/sys/sys/pool.h,v retrieving revision 1.89 diff -p -u -r1.89 pool.h --- sys/sys/pool.h 9 May 2019 08:16:15 -0000 1.89 +++ sys/sys/pool.h 21 Feb 2020 09:47:43 -0000 @@ -161,6 +161,7 @@ struct pool { #define PR_GROWINGNOWAIT 0x4000 /* pool_grow in progress by PR_NOWAIT alloc */ #define PR_ZERO 0x8000 /* zero data before returning */ #define PR_USEBMAP 0x10000 /* use a bitmap to manage freed items */ +#define PR_MAYFAIL 0x20000 /* allocator may fail even if WAITOK */ /* * `pr_lock' protects the pool's data structures when removing