commit fa8bd269451d5351642d3b96cd639d2d5d7838d0 Author: Kamil Rytarowski Date: Sat Jun 13 17:19:37 2020 +0200 Add support for RUMP_USE_LIBC_ALLOCATORS This option wires the kernel allocators directly to the libc functions. This is useful for sanitizers with their fine-grained checks of allocated chunks. Add a new Makefile option: RUMP_ALLOCATORS that can be set to either "emulated" (the current unchanged default) and "libc". diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index dabc4a045a11..66d044374013 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -78,6 +78,10 @@ __KERNEL_RCSID(0, "$NetBSD: kern_malloc.c,v 1.158 2019/11/14 16:23:52 maxv Exp $ #include #include +#ifdef _RUMPKERNEL +#include "rump_private.h" +#endif + /* * Built-in malloc types. Note: ought to be removed. */ @@ -104,6 +108,20 @@ struct malloc_header { void * kern_malloc(unsigned long reqsize, int flags) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + struct malloc_header *mh; + + reqsize += sizeof(struct malloc_header); + if (rumpuser_malloc(reqsize, 0, &mh)) + return NULL; + + if (flags & M_ZERO) + memset(mh, 0, reqsize); + + mh->mh_size = reqsize; + + return mh + 1; +#else const int kmflags = (flags & M_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; #ifdef KASAN const size_t origsize = reqsize; @@ -146,11 +164,19 @@ kern_malloc(unsigned long reqsize, int flags) kasan_mark(mh, origsize, size, KASAN_MALLOC_REDZONE); return mh; +#endif } void kern_free(void *addr) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + struct malloc_header *mh; + + mh = (struct malloc_header *)addr - 1; + + rumpuser_free(mh, mh->mh_rqsz); +#else struct malloc_header *mh; mh = addr; @@ -169,6 +195,7 @@ kern_free(void *addr) kmsan_mark(mh, mh->mh_size, KMSAN_STATE_INITED); kmem_intr_free(mh, mh->mh_size); } +#endif } void * @@ -205,11 +232,13 @@ kern_realloc(void *curaddr, unsigned long newsize, int flags) cursize = mh->mh_size - sizeof(struct malloc_header); #endif +#ifndef RUMP_USE_LIBC_ALLOCATORS /* * If we already actually have as much as they want, we're done. */ if (newsize <= cursize) return curaddr; +#endif /* * Can't satisfy the allocation with the existing block. diff --git a/sys/kern/subr_kmem.c b/sys/kern/subr_kmem.c index 6715fe0b2a9c..e8b1ed90b218 100644 --- a/sys/kern/subr_kmem.c +++ b/sys/kern/subr_kmem.c @@ -99,6 +99,10 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.80 2020/05/14 17:01:34 maxv Exp $"); #include +#ifdef _RUMPKERNEL +#include "rump_private.h" +#endif + struct kmem_cache_info { size_t kc_size; const char * kc_name; @@ -186,6 +190,14 @@ CTASSERT(KM_NOSLEEP == PR_NOWAIT); void * kmem_intr_alloc(size_t requested_size, km_flag_t kmflags) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + void *v; + + if (rumpuser_malloc(requested_size, 0, &v)) + return NULL; + + return v; +#else #ifdef KASAN const size_t origsize = requested_size; #endif @@ -230,6 +242,7 @@ kmem_intr_alloc(size_t requested_size, km_flag_t kmflags) return p; } return p; +#endif } /* @@ -253,6 +266,9 @@ kmem_intr_zalloc(size_t size, km_flag_t kmflags) void kmem_intr_free(void *p, size_t requested_size) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + rumpuser_free(p, requested_size); +#else size_t allocsz, index; size_t size; pool_cache_t pc; @@ -286,6 +302,7 @@ kmem_intr_free(void *p, size_t requested_size) LOCKDEBUG_MEM_CHECK(p, size); pool_cache_put(pc, p); +#endif } /* -------------------------------- Kmem API -------------------------------- */ diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index baaebbfafe6c..dd187c382cce 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -62,6 +62,10 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.270 2020/06/07 09:45:19 maxv Exp $") #include +#ifdef _RUMPKERNEL +#include "rump_private.h" +#endif + /* * Pool resource management utility. * @@ -1042,6 +1046,22 @@ pool_alloc_item_header(struct pool *pp, void *storage, int flags) void * pool_get(struct pool *pp, int flags) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + void *v; + pool_cache_t pc; + + if (rumpuser_malloc(pp->pr_size, 0, &v)) + return NULL; + + pc = pp->pr_cache; + + if (__predict_false(pc_has_ctor(pc)) && ((*pc->pc_ctor)(pc->pc_arg, v, flags) != 0)) { + pool_put(&pc->pc_pool, v); + return NULL; + } + + return v; +#else struct pool_item_header *ph; void *v; @@ -1215,6 +1235,7 @@ pool_get(struct pool *pp, int flags) if (flags & PR_ZERO) memset(v, 0, pp->pr_reqsize); return v; +#endif } /* @@ -1223,6 +1244,14 @@ pool_get(struct pool *pp, int flags) static void pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + pool_cache_t pc = pp->pr_cache; + + if (__predict_false(pc_has_dtor(pc))) + (*pc->pc_dtor)(pc->pc_arg, v); + + rumpuser_free(v, pp->pr_size); +#else struct pool_item_header *ph; KASSERT(mutex_owned(&pp->pr_lock)); @@ -1309,6 +1338,7 @@ pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq) LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); pp->pr_curpage = ph; } +#endif } void @@ -2524,6 +2554,19 @@ pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, void * pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + void *object; + + object = pool_get(&pc->pc_pool, flags); + + if (__predict_false(object == NULL)) + return NULL; + + if (__predict_false(pap != NULL)) + *pap = POOL_VTOPHYS(object); + + return object; +#else pool_cache_cpu_t *cc; pcg_t *pcg; void *object; @@ -2600,8 +2643,10 @@ pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) * constructor fails. */ return object; +#endif } +#ifndef RUMP_USE_LIBC_ALLOCATORS static bool __noinline pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) { @@ -2706,6 +2751,7 @@ pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) return false; } +#endif /* * pool_cache_put{,_paddr}: @@ -2716,6 +2762,9 @@ pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) void pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) { +#ifdef RUMP_USE_LIBC_ALLOCATORS + pool_put(&pc->pc_pool, object); +#else pool_cache_cpu_t *cc; pcg_t *pcg; int s; @@ -2768,6 +2817,7 @@ pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) if (!pool_cache_put_slow(cc, s, object)) break; } +#endif } /* diff --git a/sys/rump/Makefile.rump b/sys/rump/Makefile.rump index 25c75f966fc7..0169b98d3b22 100644 --- a/sys/rump/Makefile.rump +++ b/sys/rump/Makefile.rump @@ -93,6 +93,17 @@ CPPFLAGS+= -DRUMP_USE_CTOR .endif .endif +RUMP_ALLOCATORS?= emulated +.if ${RUMP_ALLOCATORS} == "emulated" +# The current default of emulating the characteristics of kernel allocators. +.elif ${RUMP_ALLOCATORS} == "libc" +# Wire the kernel allocators directly to the libc functions. +# Useful in sanitizers. +CPPFLAGS+= -DRUMP_USE_LIBC_ALLOCATORS +.else +.error Unsupported allocator scheme: ${RUMP_ALLOCATORS} +.endif + .if defined(RUMP_CURLWP) .if ${RUMP_CURLWP} == "hypercall" CPPFLAGS+= -DRUMP_CURLWP=RUMP_CURLWP_HYPERCALL