diff -r 461e169ca34e sys/uvm/uvm_map.c --- a/sys/uvm/uvm_map.c Thu Apr 29 09:27:29 2021 +0000 +++ b/sys/uvm/uvm_map.c Sat May 15 01:06:25 2021 +0000 @@ -5217,6 +5217,7 @@ fill_vmentries(struct lwp *l, pid_t pid, size_t *oldlenp) { int error; + struct lwp *l1 = NULL; struct proc *p; struct kinfo_vmentry *vme; struct vmspace *vm; @@ -5224,6 +5225,7 @@ fill_vmentries(struct lwp *l, pid_t pid, struct vm_map_entry *entry; char *dp; size_t count, vmesize; + bool unlock = false; if (elem_size == 0 || elem_size > 2 * sizeof(*vme)) return EINVAL; @@ -5238,15 +5240,47 @@ fill_vmentries(struct lwp *l, pid_t pid, } else vmesize = 0; + /* + * Find the process by pid and lock it -- unless pid is -1, in + * which case we get curproc, unlocked. + */ if ((error = proc_find_locked(l, &p, pid)) != 0) return error; - + KASSERT(pid == -1 || mutex_owned(p->p_lock)); + unlock = true; + + /* + * Initialize the loop state now, so we can back out what we've + * done if need be. + */ + vm = NULL; vme = NULL; count = 0; + /* Get a reference to the VM space. */ if ((error = proc_vmspace_getref(p, &vm)) != 0) goto out; + /* + * Find a non-zombie lwp in the process so we can grab a + * reference to it and release p->p_lock while we work -- that + * way it won't go away but we can reach into subsystems that + * need to take the lock (and we don't hoard the lock for long + * durations). + */ + LIST_FOREACH(l1, &p->p_lwps, l_sibling) { + if (l1->l_stat != LSZOMB) + break; + } + if (l1 == NULL) { + error = ESRCH; + goto out; + } + lwp_addref(l1); + if (pid != -1) + mutex_exit(p->p_lock); + unlock = false; + map = &vm->vm_map; vm_map_lock_read(map); @@ -5263,11 +5297,15 @@ fill_vmentries(struct lwp *l, pid_t pid, } count++; } + vm_map_unlock_read(map); - uvmspace_free(vm); out: - if (pid != -1) + if (vm != NULL) + uvmspace_free(vm); + if (l1 != NULL) + lwp_delref(l1); + if (unlock && pid != -1) mutex_exit(p->p_lock); if (error == 0) { const u_int esize = uimin(sizeof(*vme), elem_size);