diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index 40bc23157934..8568f8e3dc53 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -699,10 +699,14 @@ int uvm_io(struct vm_map *, struct uio *, int); /* uvm_km.c */ vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, uvm_flag_t); +int uvm_km_allocjit(struct vm_map *, vaddr_t *, vaddr_t *, + vsize_t, vsize_t, uvm_flag_t); int uvm_km_protect(struct vm_map *, vaddr_t, vsize_t, vm_prot_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, uvm_flag_t); +void uvm_km_freejit(struct vm_map *, vaddr_t, vaddr_t, + vsize_t, uvm_flag_t); struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *, vaddr_t *, vsize_t, int, bool, diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index e5a06fec87ea..5d6dd2c8abad 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -721,6 +721,64 @@ uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) return(kva); } +/* + * uvm_km_allocjit: allocate an area of kernel JIT memory. + * + * => NOTE: we can return 0 even if we can wait if there is not enough + * free VM space in the map... caller should be prepared to handle + * this case. + * => We return writable KVA of memory allocated in data and executable + * in code. On a successful allocation, return 0. + */ + +int +uvm_km_allocjit(struct vm_map *map, vaddr_t *rdata, vaddr_t *rcode, + vsize_t size, vsize_t align, uvm_flag_t flags) +{ + struct pmap *pmap; + vaddr_t data, code; + vaddr_t kva, end; + paddr_t pa; + + KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED); + KASSERT(map == kernel_map); + + data = uvm_km_alloc(map, size, align, flags); + if (data == 0) + return -1; + + code = uvm_km_alloc(map, size, align, UVM_KMF_VAONLY); + if (code == 0) { + uvm_km_free(map, data, size, UVM_KMF_WIRED); + return -1; + } + + pmap = map->pmap; + for (kva = code, end = code + size; kva < end; kva += PAGE_SIZE) { + if (!pmap_extract(pmap, kva, &pa)) { + return fail; + } + pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_EXECUTE, + PMAP_WIRED); + } + pmap_update(pmap); + + if (rdata) + *rdata = data; + if (rcode) + *rcode = code; + return 0; + +fail: + if (kva > code) { + pmap_kremove(code, kva - code); + pmap_update(pmap); + } + uvm_km_free(map, data, size, UVM_KMF_VAONLY); + uvm_km_free(map, code, size, UVM_KMF_WIRED); + return -1; +} + /* * uvm_km_protect: change the protection of an allocated area */ @@ -766,6 +824,25 @@ uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); } +/* + * uvm_km_freejit: free a JIT area of kernel memory + */ + +void +uvm_km_freejit(struct vm_map *map, vaddr_t data, vaddr_t code, vsize_t size, + uvm_flag_t flags) +{ + + KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED); + KASSERT(map == kernel_map); + + pmap_kremove(code, size); + pmap_update(map->pmap); + + uvm_km_free(map, data, size, UVM_KMF_VAONLY); + uvm_km_free(map, code, size, UVM_KMF_WIRED); +} + /* Sanity; must specify both or none. */ #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))