From d9e4180598e8f2cf956901bc7d836c6cd43ea3f3 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Wed, 4 Dec 2019 02:59:14 +0000 Subject: [PATCH 1/4] Allow equality in this assertion. This can happen if we lose the race mentioned in percpu_cpu_swap. --- sys/kern/subr_percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/kern/subr_percpu.c b/sys/kern/subr_percpu.c index d7a35ae6c6b2..eb1cecbbf898 100644 --- a/sys/kern/subr_percpu.c +++ b/sys/kern/subr_percpu.c @@ -149,7 +149,7 @@ percpu_cpu_enlarge(size_t size) where = xc_unicast(0, percpu_cpu_swap, ci, &pcc, ci); xc_wait(where); } - KASSERT(pcc.pcc_size < size); + KASSERT(pcc.pcc_size <= size); if (pcc.pcc_data != NULL) { kmem_free(pcc.pcc_data, pcc.pcc_size); } From d6ca859cf2cba45d5739a15c90bbca8d7a625206 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Thu, 1 Aug 2019 15:46:19 +0000 Subject: [PATCH 2/4] WIP: rewrite entropy pool --- common/lib/libc/Makefile.inc | 1 + share/man/man9/rndsink.9 | 147 -- sys/compat/common/rndpseudo_50.c | 12 +- sys/compat/netbsd32/netbsd32_rndpseudo_50.c | 8 +- sys/compat/sys/rnd.h | 4 +- sys/dev/files.dev | 2 +- sys/dev/random.c | 469 ++++ sys/dev/rndpseudo.c | 578 ----- sys/dev/sysmon/sysmon_envsys.c | 4 +- sys/kern/files.kern | 4 +- sys/kern/init_main.c | 10 +- sys/kern/kern_entropy.c | 1983 +++++++++++++++++ sys/kern/kern_rndpool.c | 289 --- sys/kern/kern_rndq.c | 1725 -------------- sys/kern/kern_rndsink.c | 254 --- sys/kern/subr_autoconf.c | 5 +- sys/kern/subr_cprng.c | 669 ++---- sys/lib/libkern/Makefile.libkern | 3 + sys/lib/libkern/entpool.c | 750 +++++++ .../rnd_private.h => lib/libkern/entpool.h} | 79 +- sys/rump/dev/lib/librnd/Makefile | 2 +- sys/rump/librump/rumpkern/Makefile.rumpkern | 4 +- sys/rump/librump/rumpkern/emul.c | 2 - sys/rump/librump/rumpkern/rump.c | 5 +- sys/sys/compat_stub.h | 4 +- sys/sys/cprng.h | 12 +- sys/sys/{rndsink.h => entropy.h} | 45 +- sys/sys/rnd.h | 1 + sys/sys/rndpool.h | 36 +- sys/sys/rndsource.h | 108 +- 30 files changed, 3547 insertions(+), 3668 deletions(-) delete mode 100644 share/man/man9/rndsink.9 create mode 100644 sys/dev/random.c delete mode 100644 sys/dev/rndpseudo.c create mode 100644 sys/kern/kern_entropy.c delete mode 100644 sys/kern/kern_rndpool.c delete mode 100644 sys/kern/kern_rndq.c delete mode 100644 sys/kern/kern_rndsink.c create mode 100644 sys/lib/libkern/entpool.c rename sys/{dev/rnd_private.h => lib/libkern/entpool.h} (54%) rename sys/sys/{rndsink.h => entropy.h} (64%) diff --git a/common/lib/libc/Makefile.inc b/common/lib/libc/Makefile.inc index 64f8d75dcbf3..3dd5fd47ca6b 100644 --- a/common/lib/libc/Makefile.inc +++ b/common/lib/libc/Makefile.inc @@ -45,3 +45,4 @@ CPPFLAGS+=-I${COMMON_DIR}/quad -I${COMMON_DIR}/string .if defined(COMMON_ARCHSUBDIR) CPPFLAGS+=-I${COMMON_ARCHDIR}/string .endif +CPPFLAGS+=-I${COMMON_DIR}/hash/sha3 diff --git a/share/man/man9/rndsink.9 b/share/man/man9/rndsink.9 deleted file mode 100644 index 1ee7437a7842..000000000000 --- a/share/man/man9/rndsink.9 +++ /dev/null @@ -1,147 +0,0 @@ -.\" $NetBSD: rndsink.9,v 1.2 2013/06/24 04:21:20 riastradh Exp $ -.\" -.\" Copyright (c) 2013 The NetBSD Foundation, Inc. -.\" All rights reserved. -.\" -.\" This documentation is derived from text contributed to The NetBSD -.\" Foundation by Taylor R. Campbell. -.\" -.\" Redistribution and use in source and binary forms, with or without -.\" modification, are permitted provided that the following conditions -.\" are met: -.\" 1. Redistributions of source code must retain the above copyright -.\" notice, this list of conditions and the following disclaimer. -.\" 2. Redistributions in binary form must reproduce the above copyright -.\" notice, this list of conditions and the following disclaimer in the -.\" documentation and/or other materials provided with the distribution. -.\" -.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS -.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS -.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -.\" POSSIBILITY OF SUCH DAMAGE. -.\" -.Dd April 10, 2013 -.Dt RNDSINK 9 -.Os -.Sh NAME -.Nm rndsink , -.Nm rndsink_create , -.Nm rndsink_destroy , -.Nm rndsink_request , -.Nm rndsink_schedule , -.Nd functions to asynchronously request entropy from the system entropy pool -.Sh SYNOPSIS -.In sys/rndsink.h -.Ft struct rndsink * -.Fn rndsink_create "size_t bytes" "void (*callback)(void *, const void *, size_t)" "void *arg" -.Ft void -.Fn rndsink_destroy "struct rndsink *rndsink" -.Ft bool -.Fn rndsink_request "struct rndsink *rndsink" "void *buffer" "size_t bytes" -.Ft void -.Fn rndsink_schedule "struct rndsink *rndsink" -.Sh DESCRIPTION -The -.Nm -functions support asynchronous requests for entropy from the system -entropy pool. -Users must call -.Fn rndsink_create -to create an rndsink which they may then pass to -.Fn rndsink_request -to request data from the system entropy pool. -If full entropy is not available, the system will call a callback when -entropy is next available. -Users can schedule a callback without requesting data now using -.Fn rndsink_schedule . -When users no longer need an rndsink, they must pass it to -.Fn rndsink_destroy . -.Pp -This API provides direct access to the system entropy pool. -Most users should use the -.Xr cprng 9 -API instead, which interposes a cryptographic pseudorandom number -generator between the user and the entropy pool. -.Sh FUNCTIONS -.Bl -tag -width abcd -.It Fn rndsink_create bytes callback arg -Create an rndsink for requests of -.Fa bytes -bytes of entropy, which must be no more than -.Dv RNDSINK_MAX_BYTES . -When requested and enough entropy is available, the system will call -.Fa callback -with three arguments: -.Bl -item -offset indent -.It -.Fa arg , -an arbitrary user-supplied pointer; -.It -a pointer to a buffer containing the bytes of entropy; and -.It -the number of bytes in the buffer, which will always be -.Fa bytes . -.El -.Pp -The callback will be called in soft interrupt context. -.Pp -.Fn rndsink_create -may sleep to allocate memory. -.It Fn rndsink_destroy rndsink -Destroy an rndsink. -.Fn rndsink_destroy -may sleep to wait for pending callbacks to complete and to deallocate -memory. -.It Fn rndsink_request rndsink buffer bytes -Store -.Fa bytes -bytes derived from the system entropy pool in -.Fa buffer . -If the bytes have full entropy, return true. -Otherwise, schedule a callback as if with -.Fn rndsink_schedule -and return false. -In either case, -.Fn rndsink_request -will store data in -.Fa buffer . -The argument -.Fa bytes -must be the same as the argument to -.Fn rndsink_create -that was used to create -.Fa rndsink . -May be called at -.Dv IPL_VM -or lower. -The caller should use -.Xr explicit_memset 3 -to clear -.Fa buffer -once it has used the data stored there. -.It Fn rndsink_schedule rndsink -Schedule a callback when the system entropy pool has enough entropy. -If a callback is already scheduled, it remains scheduled. -May be called at -.Dv IPL_VM -or lower. -.El -.Sh CODE REFERENCES -The rndsink API is implemented in -.Pa sys/kern/kern_rndsink.c -and -.Pa sys/sys/rndsink.h . -.Sh SEE ALSO -.Xr explicit_memset 3 , -.Xr cprng 9 , -.Xr rnd 9 -.Sh HISTORY -The rndsink API first appeared in -.Nx 7.0 . diff --git a/sys/compat/common/rndpseudo_50.c b/sys/compat/common/rndpseudo_50.c index e5126742125c..0b409fd40464 100644 --- a/sys/compat/common/rndpseudo_50.c +++ b/sys/compat/common/rndpseudo_50.c @@ -37,11 +37,11 @@ __KERNEL_RCSID(0, "$NetBSD: rndpseudo_50.c,v 1.5 2019/09/26 01:28:27 christos Ex #endif #include +#include +#include #include - -#include #include -#include +#include #include #include @@ -66,7 +66,7 @@ rndsource_to_rndsource50(rndsource_t *r, rndsource50_t *r50) * It also handles the case of (COMPAT_50 && COMPAT_NETBSD32). */ int -compat_50_rnd_ioctl(struct file *fp, u_long cmd, void *addr) +compat_50_rnd_ioctl(u_long cmd, void *addr) { int ret = 0; @@ -84,7 +84,7 @@ compat_50_rnd_ioctl(struct file *fp, u_long cmd, void *addr) rstbuf.start = rst50->start; rstbuf.count = rst50->count; - ret = (fp->f_ops->fo_ioctl)(fp, RNDGETSRCNUM, &rstbuf); + ret = entropy_ioctl(RNDGETSRCNUM, &rstbuf); if (ret != 0) return ret; @@ -105,7 +105,7 @@ compat_50_rnd_ioctl(struct file *fp, u_long cmd, void *addr) strlcpy(rstnmbuf.name, rstnm50->name, sizeof(rstnmbuf.name)); - ret = (fp->f_ops->fo_ioctl)(fp, RNDGETSRCNAME, &rstnmbuf); + ret = entropy_ioctl(RNDGETSRCNAME, &rstnmbuf); if (ret != 0) return ret; diff --git a/sys/compat/netbsd32/netbsd32_rndpseudo_50.c b/sys/compat/netbsd32/netbsd32_rndpseudo_50.c index 1a306bffa0fd..e8edb51be29b 100644 --- a/sys/compat/netbsd32/netbsd32_rndpseudo_50.c +++ b/sys/compat/netbsd32/netbsd32_rndpseudo_50.c @@ -38,8 +38,8 @@ __KERNEL_RCSID(0, "$NetBSD: netbsd32_rndpseudo_50.c,v 1.4 2019/09/26 01:32:09 ch #endif #include +#include #include - #include #include @@ -70,7 +70,7 @@ rndsource_to_rndsource50_32(rndsource_t *r, rndsource50_32_t *r50_32) * It also handles the case of (COMPAT_50 && COMPAT_NETBSD32). */ int -compat32_50_rnd_ioctl(struct file *fp, u_long cmd, void *addr) +compat32_50_rnd_ioctl(u_long cmd, void *addr) { int ret = 0; @@ -87,7 +87,7 @@ compat32_50_rnd_ioctl(struct file *fp, u_long cmd, void *addr) rstbuf.start = rst50_32->start; rstbuf.count = rst50_32->count; - ret = (fp->f_ops->fo_ioctl)(fp, RNDGETSRCNUM, &rstbuf); + ret = entropy_ioctl(RNDGETSRCNUM, &rstbuf); if (ret != 0) return ret; @@ -108,7 +108,7 @@ compat32_50_rnd_ioctl(struct file *fp, u_long cmd, void *addr) strlcpy(rstnmbuf.name, rstnm50_32->name, sizeof(rstnmbuf.name)); - ret = (fp->f_ops->fo_ioctl)(fp, RNDGETSRCNAME, &rstnmbuf); + ret = entropy_ioctl(RNDGETSRCNAME, &rstnmbuf); if (ret != 0) return ret; diff --git a/sys/compat/sys/rnd.h b/sys/compat/sys/rnd.h index 3aad6f597fa6..f46962a95a69 100644 --- a/sys/compat/sys/rnd.h +++ b/sys/compat/sys/rnd.h @@ -136,8 +136,8 @@ typedef struct { * Compatibility with NetBSD-5 ioctls. */ #ifdef _KERNEL -int compat_50_rnd_ioctl(struct file *, u_long, void *); -int compat32_50_rnd_ioctl(struct file *, u_long, void *); +int compat_50_rnd_ioctl(u_long, void *); +int compat32_50_rnd_ioctl(u_long, void *); #endif #define RNDGETSRCNUM50 _IOWR('R', 102, rndstat50_t) diff --git a/sys/dev/files.dev b/sys/dev/files.dev index 623d52c1e4d5..5c402de02273 100644 --- a/sys/dev/files.dev +++ b/sys/dev/files.dev @@ -21,7 +21,7 @@ file dev/md.c md file dev/mm.c kern # XXX file dev/nullcons_subr.c nullcons needs-flag file dev/radio.c radio needs-flag -file dev/rndpseudo.c rnd needs-flag +file dev/random.c rnd needs-flag file dev/sequencer.c sequencer needs-flag file dev/video.c video needs-flag file dev/vnd.c vnd diff --git a/sys/dev/random.c b/sys/dev/random.c new file mode 100644 index 000000000000..e1beb7d95b83 --- /dev/null +++ b/sys/dev/random.c @@ -0,0 +1,469 @@ +/* $NetBSD$ */ + +/*- + * Copyright (c) 2019 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Taylor R. Campbell. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * /dev/random, /dev/urandom -- stateless version + * + * For short reads from /dev/urandom, up to 256 bytes, read from a + * per-CPU NIST Hash_DRBG instance that is reseeded as soon as the + * system has enough entropy. + * + * For all other reads, instantiate a fresh NIST Hash_DRBG from + * the global entropy pool, and draw from it. + * + * Each read is independent; there is no per-open state. + * Concurrent reads from the same open run in parallel. + * + * Reading from /dev/random may block until entropy is available. + * Either device may return short reads if interrupted. + */ + +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "ioconf.h" + +static dev_type_open(random_open); +static dev_type_close(random_close); +static dev_type_ioctl(random_ioctl); +static dev_type_poll(random_poll); +static dev_type_kqfilter(random_kqfilter); +static dev_type_read(random_read); +static dev_type_write(random_write); + +const struct cdevsw rnd_cdevsw = { + .d_open = random_open, + .d_close = random_close, + .d_read = random_read, + .d_write = random_write, + .d_ioctl = random_ioctl, + .d_stop = nostop, + .d_tty = notty, + .d_poll = random_poll, + .d_mmap = nommap, + .d_kqfilter = random_kqfilter, + .d_discard = nodiscard, + .d_flag = D_OTHER|D_MPSAFE, +}; + +/* + * Event counters + * + * Must be careful with adding these because they can serve as + * side channels. + */ +static struct evcnt devrandom_open = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, + NULL, "devrandom", "open"); +static struct evcnt devurandom_open = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, + NULL, "devurandom", "open"); +EVCNT_ATTACH_STATIC(devrandom_open); +EVCNT_ATTACH_STATIC(devurandom_open); + +#define RANDOM_BUFSIZE 512 /* XXX pulled from arse */ +static pool_cache_t random_buf_pc __read_mostly; + +/* Entropy source for writes to /dev/random and /dev/urandom */ +static krndsource_t user_rndsource; + +void +rndattach(int num) +{ + + random_buf_pc = pool_cache_init(RANDOM_BUFSIZE, 0, 0, 0, + "randombuf", NULL, IPL_NONE, NULL, NULL, NULL); + rnd_attach_source(&user_rndsource, "/dev/random", RND_TYPE_UNKNOWN, + RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE); +} + +static int +random_open(dev_t dev, int flags, int fmt, struct lwp *l) +{ + + /* Validate minor and count statistics. */ + switch (minor(dev)) { + case RND_DEV_RANDOM: + devrandom_open.ev_count++; + return 0; + case RND_DEV_URANDOM: + devurandom_open.ev_count++; + return 0; + default: + return ENXIO; + } +} + +static int +random_close(dev_t dev, int flags, int fmt, struct lwp *l) +{ + + /* Success! */ + return 0; +} + +static int +random_ioctl(dev_t dev, unsigned long cmd, void *data, int flag, struct lwp *l) +{ + + /* + * No non-blocking/async options; otherwise defer to + * entropy_ioctl. + */ + switch (cmd) { + case FIONBIO: + case FIOASYNC: + return 0; + default: + return entropy_ioctl(cmd, data); + } +} + +static int +random_poll(dev_t dev, int events, struct lwp *l) +{ + + /* /dev/random may block; /dev/urandom is always ready. */ + switch (minor(dev)) { + case RND_DEV_RANDOM: + return entropy_poll(events); + case RND_DEV_URANDOM: + return events & (POLLIN|POLLRDNORM | POLLOUT|POLLWRNORM); + default: + return 0; + } +} + +static int +random_kqfilter(dev_t dev, struct knote *kn) +{ + + /* Validate the event filter. */ + switch (kn->kn_filter) { + case EVFILT_READ: + case EVFILT_WRITE: + break; + default: + return EINVAL; + } + + /* /dev/random may block; /dev/urandom never does. */ + switch (minor(dev)) { + case RND_DEV_RANDOM: + if (kn->kn_filter == EVFILT_READ) + return entropy_kqfilter(kn); + /* FALLTHROUGH */ + case RND_DEV_URANDOM: + kn->kn_fop = &seltrue_filtops; + return 0; + default: + return ENXIO; + } +} + +/* + * random_read(dev, uio, flags) + * + * Generate data from a PRNG seeded from the entropy pool. + * + * - If /dev/random, block until we have full entropy, or fail + * with EWOULDBLOCK, and if `depleting' entropy, return at most + * the entropy pool's capacity at once. + * + * - If /dev/urandom, generate data from whatever is in the + * entropy pool now. + * + * On interrupt, return a short read, but not shorter than 256 + * bytes (actually, no shorter than RANDOM_BUFSIZE bytes, which is + * 512 for hysterical raisins). + */ +static int +random_read(dev_t dev, struct uio *uio, int flags) +{ + uint8_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; + struct nist_hash_drbg drbg; + uint8_t *buf; + bool interruptible = false; + int error; + + switch (minor(dev)) { + case RND_DEV_URANDOM: + /* + * Short read: Just generate the output directly with + * per-CPU cprng_strong and be done with it. + */ + if (uio->uio_resid <= 256) { + uint8_t stackbuf[256]; + + /* Generate data into a temporary buffer. */ + cprng_strong(user_cprng, stackbuf, uio->uio_resid, 0); + + /* Transfer the data out. */ + error = uiomove(stackbuf, uio->uio_resid, uio); + + /* Zero the temporary buffer and return. */ + explicit_memset(stackbuf, 0, sizeof stackbuf); + return error; + } + + /* + * Long read: Generate a short seed with per-CPU + * cprng_strong for a Hash_DRBG instance to generate + * the output. + */ + cprng_strong(user_cprng, seed, sizeof seed, 0); + break; + case RND_DEV_RANDOM: { + /* + * Draw a short seed with entropy_extract -- and wait + * or fail if need be. + */ + int extractflags = ENTROPY_SIG; + + /* Translate !FNONBLOCK to ENTROPY_WAIT. */ + if (!ISSET(flags, FNONBLOCK)) + extractflags |= ENTROPY_WAIT; + + /* Query the entropy pool. */ + error = entropy_extract(seed, sizeof seed, extractflags); + if (error) + return error; + break; + } + default: + return ENXIO; + } + + /* Instantiate the DRBG. */ + if (nist_hash_drbg_instantiate(&drbg, seed, sizeof seed, NULL, 0, + NULL, 0)) + panic("nist_hash_drbg_instantiate"); + + /* Promptly zero the seed. */ + explicit_memset(seed, 0, sizeof seed); + + /* Get a buffer for transfers. */ + buf = pool_cache_get(random_buf_pc, PR_WAITOK); + + /* Generate data. Assume no error until failure. */ + error = 0; + while (uio->uio_resid) { + size_t n = uio->uio_resid; + + /* No more than one buffer's worth. */ + n = MIN(n, RANDOM_BUFSIZE); + + /* + * If we're `depleting' and this is /dev/random, clamp + * to the smaller of the entropy capacity or the seed. + */ + if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && + minor(dev) == RND_DEV_RANDOM) { + n = MIN(n, ENTROPY_CAPACITY); + n = MIN(n, sizeof seed); + /* + * Guarantee never to return more than one + * buffer in this case to minimize bookkeeping. + */ + CTASSERT(ENTROPY_CAPACITY <= RANDOM_BUFSIZE); + CTASSERT(sizeof seed <= RANDOM_BUFSIZE); + } + + /* + * Allow interruption, but only after providing a + * minimum number of bytes. + */ + CTASSERT(RANDOM_BUFSIZE >= 256); + if (interruptible) { + /* Yield if requested. */ + if (curcpu()->ci_schedstate.spc_flags & + SPCF_SHOULDYIELD) + preempt(); + + /* Check for interruption. */ + if (__predict_false(curlwp->l_flag & LW_PENDSIG) && + sigispending(curlwp, 0)) { + error = EINTR; /* XXX ERESTART? */ + break; + } + } + + /* + * Try to generate a block of data, but if we've hit + * the DRBG reseed interval, reseed. + */ + if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) { + /* + * Get a fresh seed without blocking -- we have + * already generated some output so it is not + * useful to block. This can fail only if the + * request is obscenely large, so it is OK for + * either /dev/random or /dev/urandom to fail: + * we make no promises about gigabyte-sized + * reads happening all at once. + */ + error = entropy_extract(seed, sizeof seed, 0); + if (error) + break; + + /* Reseed and try again. */ + if (nist_hash_drbg_reseed(&drbg, seed, sizeof seed, + NULL, 0)) + panic("nist_hash_drbg_reseed"); + + /* Promptly zero the seed. */ + explicit_memset(seed, 0, sizeof seed); + + /* If it fails now, that's a bug. */ + if (nist_hash_drbg_generate(&drbg, buf, n, NULL, 0)) + panic("nist_hash_drbg_generate"); + } + + /* Transfer n bytes out. */ + error = uiomove(buf, n, uio); + if (error) + break; + + /* + * If we're `depleting' and this is /dev/random, stop + * here, return what we have, and force the next read + * to reseed. Could grab more from the pool if + * possible without blocking, but that's more + * work. + */ + if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && + minor(dev) == RND_DEV_RANDOM) { + error = 0; + break; + } + + /* + * We have generated one block of output, so it is + * reasonable to allow interruption after this point. + */ + interruptible = true; + } + + /* Zero the buffer and return it to the pool cache. */ + explicit_memset(buf, 0, RANDOM_BUFSIZE); + pool_cache_put(random_buf_pc, buf); + + return error; +} + +/* + * random_write(dev, uio, flags) + * + * Enter data from userland in uio into the entropy pool. + * + * Assume privileged users provide full entropy, and unprivileged + * users provide no entropy. If you have a nonuniform source of + * data with n bytes of min-entropy, hash it with an XOF like + * SHAKE128 into exactly n bytes first. + */ +static int +random_write(dev_t dev, struct uio *uio, int flags) +{ + kauth_cred_t cred; + uint8_t *buf; + bool privileged = false; + int error = 0; + + /* Get the caller's credentials. */ + cred = kauth_cred_get(); + + /* Verify user's authorization to affect the entropy pool. */ + error = kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA, + NULL, NULL, NULL, NULL); + if (error) + return error; + + /* + * Check whether user is privileged. If so, assume user + * furnishes full-entropy data; if not, accept user's data but + * assume it has zero entropy when we do accounting. + */ + if (kauth_authorize_device(cred, KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, + NULL, NULL, NULL, NULL) == 0) + privileged = true; + + /* Get a buffer for transfers. XXX Make this interruptible! */ + buf = pool_cache_get(random_buf_pc, PR_WAITOK); + + /* Consume data. */ + while (uio->uio_resid) { + size_t n = uio->uio_resid; + + /* No more than one buffer's worth in one step. */ + n = MIN(uio->uio_resid, RANDOM_BUFSIZE); + + /* Yield if requested. */ + if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) + preempt(); + + /* Check for interruption. */ + if (__predict_false(curlwp->l_flag & LW_PENDSIG) && + sigispending(curlwp, 0)) { + error = EINTR; /* XXX ERESTART? */ + break; + } + + /* Transfer n bytes in and enter them into the pool. */ + error = uiomove(buf, n, uio); + if (error) + break; + rnd_add_data(&user_rndsource, buf, n, privileged ? n*NBBY : 0); + } + + /* Zero the buffer and return it to the pool cache. */ + explicit_memset(buf, 0, RANDOM_BUFSIZE); + pool_cache_put(random_buf_pc, buf); + return error; +} diff --git a/sys/dev/rndpseudo.c b/sys/dev/rndpseudo.c deleted file mode 100644 index c93deeda9bd9..000000000000 --- a/sys/dev/rndpseudo.c +++ /dev/null @@ -1,578 +0,0 @@ -/* $NetBSD: rndpseudo.c,v 1.38 2019/09/02 20:09:30 riastradh Exp $ */ - -/*- - * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Michael Graff , Thor Lancelot Simon, and - * Taylor R. Campbell. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__KERNEL_RCSID(0, "$NetBSD: rndpseudo.c,v 1.38 2019/09/02 20:09:30 riastradh Exp $"); - -#if defined(_KERNEL_OPT) -#include "opt_compat_netbsd.h" -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "ioconf.h" - -#if defined(__HAVE_CPU_COUNTER) -#include -#endif - -#ifdef RND_DEBUG -#define DPRINTF(l,x) if (rnd_debug & (l)) printf x -#else -#define DPRINTF(l,x) -#endif - -/* - * list devices attached - */ -#if 0 -#define RND_VERBOSE -#endif - -/* - * The size of a temporary buffer for reading and writing entropy. - */ -#define RND_TEMP_BUFFER_SIZE 512 - -static pool_cache_t rnd_temp_buffer_cache __read_mostly; - -/* - * Per-open state -- a lazily initialized CPRNG. - */ -struct rnd_ctx { - struct cprng_strong *rc_cprng; - bool rc_hard; -}; - -static pool_cache_t rnd_ctx_cache __read_mostly; - -/* - * The per-CPU RNGs used for short requests - */ -static percpu_t *percpu_urandom_cprng __read_mostly; - - -dev_type_open(rndopen); - -const struct cdevsw rnd_cdevsw = { - .d_open = rndopen, - .d_close = noclose, - .d_read = noread, - .d_write = nowrite, - .d_ioctl = noioctl, - .d_stop = nostop, - .d_tty = notty, - .d_poll = nopoll, - .d_mmap = nommap, - .d_kqfilter = nokqfilter, - .d_discard = nodiscard, - .d_flag = D_OTHER | D_MPSAFE -}; - -static int rnd_read(struct file *, off_t *, struct uio *, kauth_cred_t, int); -static int rnd_write(struct file *, off_t *, struct uio *, kauth_cred_t, int); -static int rnd_ioctl(struct file *, u_long, void *); -static int rnd_poll(struct file *, int); -static int rnd_stat(struct file *, struct stat *); -static int rnd_close(struct file *); -static int rnd_kqfilter(struct file *, struct knote *); - -const struct fileops rnd_fileops = { - .fo_name = "rnd", - .fo_read = rnd_read, - .fo_write = rnd_write, - .fo_ioctl = rnd_ioctl, - .fo_fcntl = fnullop_fcntl, - .fo_poll = rnd_poll, - .fo_stat = rnd_stat, - .fo_close = rnd_close, - .fo_kqfilter = rnd_kqfilter, - .fo_restart = fnullop_restart -}; - -static struct evcnt rndpseudo_soft = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, - NULL, "rndpseudo", "open soft"); -static struct evcnt rndpseudo_hard = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, - NULL, "rndpseudo", "open hard"); -EVCNT_ATTACH_STATIC(rndpseudo_soft); -EVCNT_ATTACH_STATIC(rndpseudo_hard); - -/* - * Generate a 32-bit counter. - */ -static inline uint32_t -rndpseudo_counter(void) -{ - struct bintime bt; - uint32_t ret; - -#if defined(__HAVE_CPU_COUNTER) - if (cpu_hascounter()) - return (cpu_counter32()); -#endif - - binuptime(&bt); - ret = bt.sec; - ret ^= bt.sec >> 32; - ret ^= bt.frac; - ret ^= bt.frac >> 32; - - return ret; -} - -/* - * Used by ioconf.c to attach the rnd pseudo-device. - * `Attach' the random device. We use the timing of this event as - * another potential source of initial entropy. - */ -void -rndattach(int num) -{ - uint32_t c; - - /* Trap unwary players who don't call rnd_init() early. */ - KASSERT(rnd_ready); - - rnd_temp_buffer_cache = pool_cache_init(RND_TEMP_BUFFER_SIZE, 0, 0, 0, - "rndtemp", NULL, IPL_NONE, NULL, NULL, NULL); - rnd_ctx_cache = pool_cache_init(sizeof(struct rnd_ctx), 0, 0, 0, - "rndctx", NULL, IPL_NONE, NULL, NULL, NULL); - percpu_urandom_cprng = percpu_alloc(sizeof(struct cprng_strong *)); - - /* Mix in another counter. */ - c = rndpseudo_counter(); - rnd_add_data(NULL, &c, sizeof(c), 1); -} - -int -rndopen(dev_t dev, int flags, int fmt, struct lwp *l) -{ - bool hard; - struct file *fp; - int fd; - int error; - - switch (minor(dev)) { - case RND_DEV_URANDOM: - hard = false; - rndpseudo_soft.ev_count++; - break; - - case RND_DEV_RANDOM: - hard = true; - rndpseudo_hard.ev_count++; - break; - - default: - return ENXIO; - } - - error = fd_allocfile(&fp, &fd); - if (error) - return error; - - /* - * Allocate a context, but don't create a CPRNG yet -- do that - * lazily because it consumes entropy from the system entropy - * pool, which (currently) has the effect of depleting it and - * causing readers from /dev/random to block. If this is - * /dev/urandom and the process is about to send only short - * reads to it, then we will be using a per-CPU CPRNG anyway. - */ - struct rnd_ctx *const ctx = pool_cache_get(rnd_ctx_cache, PR_WAITOK); - ctx->rc_cprng = NULL; - ctx->rc_hard = hard; - - error = fd_clone(fp, fd, flags, &rnd_fileops, ctx); - KASSERT(error == EMOVEFD); - - return error; -} - -/* - * Fetch a /dev/u?random context's CPRNG, or create and save one if - * necessary. - */ -static struct cprng_strong * -rnd_ctx_cprng(struct rnd_ctx *ctx) -{ - struct cprng_strong *cprng, *tmp = NULL; - - /* Fast path: if someone has already allocated a CPRNG, use it. */ - cprng = ctx->rc_cprng; - if (__predict_true(cprng != NULL)) { - /* Make sure the CPU hasn't prefetched cprng's guts. */ - membar_consumer(); - goto out; - } - - /* Slow path: create a CPRNG. Allocate before taking locks. */ - char name[64]; - struct lwp *const l = curlwp; - (void)snprintf(name, sizeof(name), "%d %"PRIu64" %u", - (int)l->l_proc->p_pid, l->l_ncsw, l->l_cpticks); - const int flags = (ctx->rc_hard? (CPRNG_USE_CV | CPRNG_HARD) : - (CPRNG_INIT_ANY | CPRNG_REKEY_ANY)); - tmp = cprng_strong_create(name, IPL_NONE, flags); - - /* Publish cprng's guts before the pointer to them. */ - membar_producer(); - - /* Attempt to publish tmp, unless someone beat us. */ - cprng = atomic_cas_ptr(&ctx->rc_cprng, NULL, tmp); - if (__predict_false(cprng != NULL)) { - /* Make sure the CPU hasn't prefetched cprng's guts. */ - membar_consumer(); - goto out; - } - - /* Published. Commit tmp. */ - cprng = tmp; - tmp = NULL; - -out: if (tmp != NULL) - cprng_strong_destroy(tmp); - KASSERT(cprng != NULL); - return cprng; -} - -/* - * Fetch a per-CPU CPRNG, or create and save one if necessary. - */ -static struct cprng_strong * -rnd_percpu_cprng(void) -{ - struct cprng_strong **cprngp, *cprng, *tmp = NULL; - - /* Fast path: if there already is a CPRNG for this CPU, use it. */ - cprngp = percpu_getref(percpu_urandom_cprng); - cprng = *cprngp; - if (__predict_true(cprng != NULL)) - goto out; - percpu_putref(percpu_urandom_cprng); - - /* - * Slow path: create a CPRNG named by this CPU. - * - * XXX The CPU of the name may be different from the CPU to - * which it is assigned, because we need to choose a name and - * allocate a cprng while preemption is enabled. This could be - * fixed by changing the cprng_strong API (e.g., by adding a - * cprng_strong_setname or by separating allocation from - * initialization), but it's not clear that's worth the - * trouble. - */ - char name[32]; - (void)snprintf(name, sizeof(name), "urandom%u", cpu_index(curcpu())); - tmp = cprng_strong_create(name, IPL_NONE, - (CPRNG_INIT_ANY | CPRNG_REKEY_ANY)); - - /* Try again, but we may have been preempted and lost a race. */ - cprngp = percpu_getref(percpu_urandom_cprng); - cprng = *cprngp; - if (__predict_false(cprng != NULL)) - goto out; - - /* Commit the CPRNG we just created. */ - cprng = tmp; - tmp = NULL; - *cprngp = cprng; - -out: percpu_putref(percpu_urandom_cprng); - if (tmp != NULL) - cprng_strong_destroy(tmp); - KASSERT(cprng != NULL); - return cprng; -} - -static int -rnd_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, - int flags) -{ - int error = 0; - - DPRINTF(RND_DEBUG_READ, - ("Random: Read of %zu requested, flags 0x%08x\n", - uio->uio_resid, flags)); - - if (uio->uio_resid == 0) - return 0; - - struct rnd_ctx *const ctx = fp->f_rndctx; - uint8_t *const buf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK); - - /* - * Choose a CPRNG to use -- either the per-open CPRNG, if this - * is /dev/random or a long read, or the per-CPU one otherwise. - * - * XXX NIST_HASH_DRBG_MIN_SEEDLEN_BYTES is a detail of the cprng(9) - * implementation and as such should not be mentioned here. - */ - struct cprng_strong *const cprng = - ((ctx->rc_hard || - (uio->uio_resid > NIST_HASH_DRBG_MIN_SEEDLEN_BYTES))? - rnd_ctx_cprng(ctx) : rnd_percpu_cprng()); - - /* - * Generate the data in RND_TEMP_BUFFER_SIZE chunks. - */ - while (uio->uio_resid > 0) { - const size_t n_req = MIN(uio->uio_resid, RND_TEMP_BUFFER_SIZE); - - CTASSERT(RND_TEMP_BUFFER_SIZE <= CPRNG_MAX_LEN); - const size_t n_read = cprng_strong(cprng, buf, n_req, - ((ctx->rc_hard && ISSET(fp->f_flag, FNONBLOCK))? - FNONBLOCK : 0)); - - /* - * Equality will hold unless this is /dev/random, in - * which case we get only as many bytes as are left - * from the CPRNG's `information-theoretic strength' - * since the last rekey. - */ - KASSERT(n_read <= n_req); - KASSERT(ctx->rc_hard || (n_read == n_req)); - - error = uiomove(buf, n_read, uio); - if (error) - goto out; - - /* - * For /dev/urandom: Reads always succeed in full, no - * matter how many iterations that takes. (XXX But - * this means the computation can't be interrupted, - * wihch seems suboptimal.) - * - * For /dev/random, nonblocking: Reads succeed with as - * many bytes as a single request can return without - * blocking, or fail with EAGAIN if a request would - * block. (There is no sense in trying multiple - * requests because if the first one didn't fill the - * buffer, the second one would almost certainly - * block.) - * - * For /dev/random, blocking: Reads succeed with as - * many bytes as a single request -- which may block -- - * can return if uninterrupted, or fail with EINTR if - * the request is interrupted. - */ - KASSERT((0 < n_read) || ctx->rc_hard); - if (ctx->rc_hard) { - if (0 < n_read) - error = 0; - else if (ISSET(fp->f_flag, FNONBLOCK)) - error = EAGAIN; - else - error = EINTR; - goto out; - } - } - -out: pool_cache_put(rnd_temp_buffer_cache, buf); - return error; -} - -static int -rnd_write(struct file *fp, off_t *offp, struct uio *uio, - kauth_cred_t cred, int flags) -{ - uint8_t *bf; - int n, ret = 0, estimate_ok = 0, estimate = 0, added = 0; - - ret = kauth_authorize_device(cred, - KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); - if (ret) { - return (ret); - } - estimate_ok = !kauth_authorize_device(cred, - KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL); - - DPRINTF(RND_DEBUG_WRITE, - ("Random: Write of %zu requested\n", uio->uio_resid)); - - if (uio->uio_resid == 0) - return (0); - ret = 0; - bf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK); - while (uio->uio_resid > 0) { - /* - * Don't flood the pool. - */ - if (added > RND_POOLWORDS * sizeof(int)) { -#ifdef RND_VERBOSE - printf("rnd: added %d already, adding no more.\n", - added); -#endif - break; - } - n = uimin(RND_TEMP_BUFFER_SIZE, uio->uio_resid); - - ret = uiomove((void *)bf, n, uio); - if (ret != 0) - break; - - if (estimate_ok) { - /* - * Don't cause samples to be discarded by taking - * the pool's entropy estimate to the max. - */ - if (added > RND_POOLWORDS / 2) - estimate = 0; - else - estimate = n * NBBY / 2; -#ifdef RND_VERBOSE - printf("rnd: adding on write, %d bytes, estimate %d\n", - n, estimate); -#endif - } else { -#ifdef RND_VERBOSE - printf("rnd: kauth says no entropy.\n"); -#endif - } - - /* - * Mix in the bytes. - */ - rnd_add_data(NULL, bf, n, estimate); - - added += n; - DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n)); - } - pool_cache_put(rnd_temp_buffer_cache, bf); - return (ret); -} - -int -rnd_ioctl(struct file *fp, u_long cmd, void *addr) -{ - - switch (cmd) { - case FIONBIO: - case FIOASYNC: - return 0; - default: - return rnd_system_ioctl(fp, cmd, addr); - } -} - -static int -rnd_poll(struct file *fp, int events) -{ - struct rnd_ctx *const ctx = fp->f_rndctx; - int revents; - - /* - * We are always writable. - */ - revents = events & (POLLOUT | POLLWRNORM); - - /* - * Save some work if not checking for reads. - */ - if ((events & (POLLIN | POLLRDNORM)) == 0) - return revents; - - /* - * For /dev/random, ask the CPRNG, which may require creating - * one. For /dev/urandom, we're always readable. - */ - if (ctx->rc_hard) - revents |= cprng_strong_poll(rnd_ctx_cprng(ctx), events); - else - revents |= (events & (POLLIN | POLLRDNORM)); - - return revents; -} - -static int -rnd_stat(struct file *fp, struct stat *st) -{ - struct rnd_ctx *const ctx = fp->f_rndctx; - - /* XXX lock, if cprng allocated? why? */ - memset(st, 0, sizeof(*st)); - st->st_dev = makedev(cdevsw_lookup_major(&rnd_cdevsw), - (ctx->rc_hard? RND_DEV_RANDOM : RND_DEV_URANDOM)); - /* XXX leave atimespect, mtimespec, ctimespec = 0? */ - - st->st_uid = kauth_cred_geteuid(fp->f_cred); - st->st_gid = kauth_cred_getegid(fp->f_cred); - st->st_mode = S_IFCHR; - return 0; -} - -static int -rnd_close(struct file *fp) -{ - struct rnd_ctx *const ctx = fp->f_rndctx; - - if (ctx->rc_cprng != NULL) - cprng_strong_destroy(ctx->rc_cprng); - fp->f_rndctx = NULL; - pool_cache_put(rnd_ctx_cache, ctx); - - return 0; -} - -static int -rnd_kqfilter(struct file *fp, struct knote *kn) -{ - struct rnd_ctx *const ctx = fp->f_rndctx; - - return cprng_strong_kqfilter(rnd_ctx_cprng(ctx), kn); -} diff --git a/sys/dev/sysmon/sysmon_envsys.c b/sys/dev/sysmon/sysmon_envsys.c index 50071b62919a..8ea89b47329d 100644 --- a/sys/dev/sysmon/sysmon_envsys.c +++ b/sys/dev/sysmon/sysmon_envsys.c @@ -694,7 +694,7 @@ sysmon_envsys_register(struct sysmon_envsys *sme) sme_event_drv_t *this_evdrv; int nevent; int error = 0; - char rnd_name[sizeof(edata->rnd_src.name)]; + char rnd_name[sizeof(edata->rnd_src.rs_name)]; KASSERT(sme != NULL); KASSERT(sme->sme_name != NULL); @@ -1172,7 +1172,7 @@ sme_remove_userprops(void) prop_dictionary_t sdict; envsys_data_t *edata = NULL; char tmp[ENVSYS_DESCLEN]; - char rnd_name[sizeof(edata->rnd_src.name)]; + char rnd_name[sizeof(edata->rnd_src.rs_name)]; sysmon_envsys_lim_t lims; const struct sme_descr_entry *sdt_units; uint32_t props; diff --git a/sys/kern/files.kern b/sys/kern/files.kern index 4887d5d7d9bd..415114fe7a28 100644 --- a/sys/kern/files.kern +++ b/sys/kern/files.kern @@ -40,6 +40,7 @@ file kern/kern_cpu.c kern #file kern/kern_ctf.c kdtrace_hooks file kern/kern_descrip.c kern +file kern/kern_entropy.c kern file kern/kern_event.c kern file kern/kern_exec.c kern file kern/kern_exit.c kern @@ -68,9 +69,6 @@ file kern/kern_ras.c kern file kern/kern_rate.c kern file kern/kern_reboot.c kern file kern/kern_resource.c kern -file kern/kern_rndpool.c kern -file kern/kern_rndq.c kern -file kern/kern_rndsink.c kern file kern/kern_runq.c kern file kern/kern_rwlock.c kern file kern/kern_rwlock_obj.c kern diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index cf93314864d9..89e6fe121c21 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -241,8 +241,6 @@ struct timespec boottime; /* time at system startup - will only follow s int start_init_exec; /* semaphore for start_init() */ -cprng_strong_t *kern_cprng; - static void check_console(struct lwp *l); static void start_init(void *); static void configure(void); @@ -388,8 +386,6 @@ main(void) */ rnd_init(); /* initialize entropy pool */ - cprng_init(); /* initialize cryptographic PRNG */ - /* Initialize process and pgrp structures. */ procinit(); lwpinit(); @@ -499,10 +495,6 @@ main(void) /* Initialize the disk wedge subsystem. */ dkwedge_init(); - /* Initialize the kernel strong PRNG. */ - kern_cprng = cprng_strong_create("kernel", IPL_VM, - CPRNG_INIT_ANY|CPRNG_REKEY_ANY); - /* Initialize pfil */ pfil_init(); @@ -525,6 +517,8 @@ main(void) /* Configure the system hardware. This will enable interrupts. */ configure(); + cprng_init(); /* initialize cryptographic PRNG */ + /* Once all CPUs are detected, initialize the per-CPU cprng_fast. */ cprng_fast_init(); diff --git a/sys/kern/kern_entropy.c b/sys/kern/kern_entropy.c new file mode 100644 index 000000000000..81e60d3b9d09 --- /dev/null +++ b/sys/kern/kern_entropy.c @@ -0,0 +1,1983 @@ +/* $NetBSD$ */ + +/*- + * Copyright (c) 2019 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Taylor R. Campbell. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Entropy subsystem + * + * * Each CPU maintains a per-CPU entropy pool so that gathering + * entropy is cheap and requires no interprocessor + * synchronization, except early at boot when we may be + * scrambling to gather entropy as soon as possible. + * + * - entropy_enter gathers entropy and never drops it on the + * floor, at the cost of sometimes having to do cryptography. + * + * - entropy_enter_intr gathers entropy or drops it on the + * floor, with low latency. Work to stir the pool or kick the + * housekeeping thread is scheduled in soft interrupts. + * + * * entropy_enter immediately enters into the global pool if it + * can transition to full entropy in one swell foop. Otherwise, + * it defers to a housekeeping thread that consolidates entropy, + * but only when the CPUs collectively have full entropy for the + * global pool, in order to mitigate iterative-guessing attacks + * on incremental partial entropy. + * + * * The entropy housekeeping thread continues to consolidate + * entropy even after we think we have full entropy, in case we + * are wrong, but is limited to one discretionary consolidation + * per minute, and only when new entropy is actually coming in, + * to avoid having a performance impact. + * + * * The entropy epoch is a number that changes when we have + * transitioned from having partial entropy to having full + * entropy, so that users can easily determine when to reseed. + * This also facilitates an operator explicitly causing + * everything to reseed by sysctl -w kern.entropy.consolidate=1, + * e.g. if they just flipped a coin 256 times and wrote `echo + * tthhhhhthh... > /dev/random'. (XXX No way to wait until the + * consolidation has completed, short of busy-waiting on the + * event counter.) + * + * * No entropy estimation based on the samples, which is a + * contradiction in terms and a potential source of side + * channels. It is the responsibility of the driver to study + * how predictable the physical source of input can ever be, and + * to furnish a lower bound on the amount of entropy coming in + * from it. + * + * * Entropy depletion is available if you're into that sort of + * thing, by setting sysctl kern.entropy.depletion=1; the logic + * to support it is very limited in scope to minimize the chance + * of bugs. + * + * Many parts of the kernel from the bootloader to driver attach + * routines during autoconf to normal userland calls to /dev/random + * need access to the entropy subsystem. The entropy subsystem is set + * up in three stages to enable this: + * + * 1. Before E->initialized. MD bootloader logic can call + * entropy_seed() and that's all. + * + * 2. After E->initialized, before E->fully_ready. All entropy + * subsystem APIs are available, but they use serialized access + * to a single global entropy pool, so may be slow. + * + * 3. After E->fully_ready. The entropy subsystem is ready to + * gather data into per-CPU pools with no interprocessor + * synchronization overhead except to gather entropy into the + * global pool when requested. + */ + +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* legacy kernel API */ +#include /* userland ioctl interface */ +#include /* kernel rndsource driver API */ +#include +#include +#include /* for boot seed checksum */ +#include +#include +#include +#include +#include + +#include + +#include + +#ifdef __HAVE_CPU_COUNTER +#include +#endif + +#define ENTROPY_MAXDEBT (NBBY*ENTROPY_CAPACITY) /* bits */ + +/* + * struct entropy_cpu + * + * Per-CPU entropy state. The pool is allocated separately + * because percpu(9) sometimes moves per-CPU objects around + * without zeroing them, which would lead to unwanted copies of + * sensitive secrets. The evcnt is allocated separately evcnt(9) + * assumes it stays put in memory. + */ +struct entropy_cpu { + struct evcnt *ec_softint_evcnt; + struct entpool *ec_pool; + unsigned ec_credit; + bool ec_locked; +}; + +/* + * struct rndsource_cpu + * + * Per-CPU rndsource state. + */ +struct rndsource_cpu { + unsigned rc_nbits; /* bits of entropy added */ +}; + +/* + * entropy_global (a.k.a. E for short in this file) + * + * Global entropy state. + */ +struct { + kmutex_t lock; /* covers all global state */ + struct entpool pool; /* global pool */ + unsigned debt; /* needed globally */ + unsigned credit; /* pending in per-CPU pools */ + unsigned timestamp; /* time of last consolidation */ + unsigned epoch; /* changes when debt -> 0 */ + kcondvar_t cv; /* notifies state changes */ + struct selinfo selq; /* notifies debt -> 0 */ + LIST_HEAD(,krndsource) early_sources; /* list of entropy sources */ + LIST_HEAD(,krndsource) sources; /* list of entropy sources */ + rndsave_t *seed; /* seed from bootloader */ + bool initialized; /* true after entropy_init */ + bool fully_ready; /* true after entropy_init_late */ + bool requesting; /* busy requesting from sources */ + bool consolidate; /* kick thread to consolidate */ + bool boot_seeded; /* true if seeded from bootloader */ + bool ioctl_seeded; /* true if seeded by RNDADDDATA */ +} entropy_global __cacheline_aligned; + +#define E (&entropy_global) /* declutter */ + +/* Read-mostly globals */ +static struct percpu *entropy_percpu __read_mostly; /* struct entropy_cpu */ +static void *entropy_sih __read_mostly; /* softint handler */ +static struct lwp *entropy_lwp __read_mostly; /* housekeeping thread */ + +int rnd_initial_entropy __read_mostly; /* legacy */ + +/* + * Event counters + * + * Must be careful with adding these because they can serve as + * side channels. + */ +static struct evcnt entropy_discretionary_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary"); +EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt); +static struct evcnt entropy_immediate_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate"); +EVCNT_ATTACH_STATIC(entropy_immediate_evcnt); +static struct evcnt entropy_partial_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial"); +EVCNT_ATTACH_STATIC(entropy_partial_evcnt); +static struct evcnt entropy_consolidate_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate"); +EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt); +static struct evcnt entropy_extract_intr_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract intr"); +EVCNT_ATTACH_STATIC(entropy_extract_intr_evcnt); +static struct evcnt entropy_extract_fail_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail"); +EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt); +static struct evcnt entropy_request_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request"); +EVCNT_ATTACH_STATIC(entropy_request_evcnt); +static struct evcnt entropy_deplete_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete"); +EVCNT_ATTACH_STATIC(entropy_deplete_evcnt); +static struct evcnt entropy_notify_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify"); +EVCNT_ATTACH_STATIC(entropy_notify_evcnt); + +/* Defaults -- move to config(5) */ +#define ENTROPY_COLLECTION 1 +#define ENTROPY_DEPLETION 0 + +/* Sysctl knobs */ +bool entropy_collection = ENTROPY_COLLECTION; +bool entropy_depletion = ENTROPY_DEPLETION; /* Silly! */ + +static const struct sysctlnode *entropy_sysctlroot; +static struct sysctllog *entropy_sysctllog; + +/* Forward declarations */ +static inline uint32_t entropy_timer(void); +static void entropy_init_cpu(void *, void *, struct cpu_info *); +static void entropy_fini_cpu(void *, void *, struct cpu_info *); +static void entropy_credit_cpu(struct entropy_cpu *); +static void entropy_enter(const void *, size_t, unsigned); +static bool entropy_enter_intr(const void *, size_t, unsigned); +static void entropy_softintr(void *); +static void entropy_thread(void *); +static void entropy_consolidate(void); +static void entropy_gather_xc(void *, void *); +static void entropy_notify(void); +static int sysctl_entropy_consolidate(SYSCTLFN_ARGS); +static void filt_entropy_read_detach(struct knote *); +static int filt_entropy_read_event(struct knote *, long); +static void entropy_request(size_t); +static void rnd_add_data_1(struct krndsource *, const void *, + uint32_t, uint32_t); +static void rndsource_to_user(struct krndsource *, rndsource_t *); +static void rndsource_to_user_est(struct krndsource *, + rndsource_est_t *); + +/* + * entropy_timer() + * + * Cycle counter, time counter, or anything that changes a wee bit + * unpredictably. + */ +static inline uint32_t +entropy_timer(void) +{ + struct bintime bt; + uint32_t v; + + /* If we have a CPU cycle counter, use the low 32 bits. */ +#ifdef __HAVE_CPU_COUNTER + if (__predict_true(cpu_hascounter())) + return cpu_counter32(); +#endif /* __HAVE_CPU_COUNTER */ + + /* If we're cold, tough. Can't binuptime while cold. */ + if (__predict_false(cold)) + return 0; + + /* Fold the 128 bits of of binuptime into 32 bits. */ + binuptime(&bt); + v = bt.frac; + v ^= bt.frac >> 32; + v ^= bt.sec; + v ^= bt.sec >> 32; + return v; +} + +/* + * entropy_init() + * + * Initialize the entropy subsystem. Panic on failure. + * + * Requires percpu(9) and sysctl(9) to be initialized. + */ +void +entropy_init(void) +{ + uint32_t extra[2]; + unsigned i = 0; + + /* Grab some cycle counts early at boot. */ + extra[i++] = entropy_timer(); + + /* Run the entropy pool cryptography self-test. */ + if (entpool_selftest() == -1) + panic("entropy pool crypto self-test failed"); + + /* Allocate the per-CPU state, to be initialized later. */ + entropy_percpu = percpu_alloc(sizeof(struct entropy_cpu)); + + /* Create the sysctl directory. */ + sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot, + CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy", + SYSCTL_DESCR("Entropy (random number sources) options"), + NULL, 0, NULL, 0, + CTL_KERN, CTL_CREATE, CTL_EOL); + + /* Create the sysctl knobs. */ + /* XXX These shouldn't be writable at securelevel>0. */ + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection", + SYSCTL_DESCR("Automatically collect entropy from hardware"), + NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion", + SYSCTL_DESCR("`Deplete' entropy pool when observed"), + NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate", + SYSCTL_DESCR("Trigger entropy consolidation now"), + sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL); + /* XXX These should maybe not be readable at securelevel>0. */ + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, + "debt", SYSCTL_DESCR("Systemwide entropy debt"), + NULL, 0, &E->debt, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, + "credit", SYSCTL_DESCR("Entropy credit pending on CPUs"), + NULL, 0, &E->credit, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT, + "epoch", SYSCTL_DESCR("Entropy epoch"), + NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_BOOL, + "boot_seeded", SYSCTL_DESCR("Entropy seeded by bootloader"), + NULL, 0, &E->boot_seeded, 0, CTL_CREATE, CTL_EOL); + sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_BOOL, + "ioctl_seeded", SYSCTL_DESCR("Entropy seeded by ioctl RNDADDDATA"), + NULL, 0, &E->ioctl_seeded, 0, CTL_CREATE, CTL_EOL); + + /* Initialize the global state. */ + mutex_init(&E->lock, MUTEX_DEFAULT, IPL_VM); + E->debt = ENTROPY_MAXDEBT; + E->epoch = 0; + cv_init(&E->cv, "entropy"); + selinit(&E->selq); + LIST_INIT(&E->sources); + + /* Initialize the global pool by zeroing it. */ + memset(&E->pool, 0, sizeof E->pool); + + /* We are now initialized. */ + E->initialized = true; + + /* If the bootloader already provided a seed, use it. */ + if (E->seed == NULL) { + printf("entropy: no seed preloaded by bootloader\n"); + } else { + rndsave_t *const seed = E->seed; + + printf("entropy: entering seed preloaded by bootloader\n"); + entropy_enter(seed->data, sizeof(seed->data), seed->entropy); + explicit_memset(seed, 0, sizeof(*seed)); + + /* + * If the bootloader claimed to provide any entropy, + * mark ourselves as seeded by the bootloader. + */ + if (seed->entropy > 0) + atomic_store_relaxed(&E->boot_seeded, true); + } + + /* Enter the boot cycle count to get started. */ + extra[i++] = entropy_timer(); + KASSERT(i == __arraycount(extra)); + entropy_enter(extra, sizeof extra, 0); + explicit_memset(extra, 0, sizeof extra); +} + +/* + * entropy_init_late() + * + * Late initialization. Panic on failure. + * + * Requires CPUs to have been detected and LWPs to have started. + */ +void +entropy_init_late(void) +{ + int error; + + /* Initialize the per-CPU state. */ + percpu_foreach(entropy_percpu, entropy_init_cpu, NULL); + + /* Establish the softint. Must happen after CPU detection. */ + entropy_sih = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, + &entropy_softintr, NULL); + if (entropy_sih == NULL) + panic("unable to establish entropy softint"); + + /* + * Create the entropy housekeeping thread. Must happen after + * lwpinit. + */ + error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL, + entropy_thread, NULL, &entropy_lwp, "entbutler"); + if (error) + panic("unable to create entropy housekeeping thread: %d", + error); + + /* + * Wait until the per-CPU initialization has been established + * on all CPUs before proceeding to mark the entropy subsystem + * fully ready. + */ + xc_barrier(XC_HIGHPRI); + atomic_store_release(&E->fully_ready, true); +} + +/* + * entropy_init_cpu(ptr, cookie, ci) + * + * percpu_foreach callback to initialize per-CPU entropy pool. + */ +static void +entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) +{ + struct entropy_cpu *ec = ptr; + + ec->ec_softint_evcnt = kmem_alloc(sizeof(*ec->ec_softint_evcnt), + KM_SLEEP); + ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP); + ec->ec_credit = 0; + ec->ec_locked = false; + + evcnt_attach_dynamic(ec->ec_softint_evcnt, EVCNT_TYPE_MISC, NULL, + ci->ci_cpuname, "entropy softint"); +} + +/* + * entropy_fini_cpu(ptr, cookie, ci) + * + * percpu_foreach callback to finalize per-CPU entropy pool. + */ +static void __unused /* XXX */ +entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) +{ + struct entropy_cpu *ec = ptr; + + /* + * Zero any lingering data. Disclosure of the per-CPU pool + * shouldn't retroactively affect the security of any keys + * generated, because we erase whatever we have just drawn out + * of any pool, but better safe than sorry. + */ + explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool)); + + evcnt_detach(ec->ec_softint_evcnt); + + kmem_free(ec->ec_pool, sizeof(*ec->ec_pool)); + kmem_free(ec->ec_softint_evcnt, sizeof(*ec->ec_softint_evcnt)); +} + +/* + * entropy_seed(seed) + * + * Seed the entropy pool with len bytes from seed. Meant to be + * called as early as possible by the bootloader; may be called + * before or after entropy_init. If called before, caller is + * responsible for serializing it. Must be called in thread or + * soft interrupt context, not in hard interrupt context. + * + * May overwrite seed in place. + */ +void +entropy_seed(rndsave_t *seed) +{ + SHA1_CTX ctx; + uint8_t digest[SHA1_DIGEST_LENGTH]; + + /* + * Verify the checksum. If the checksum fails, take the data + * but ignore the entropy estimate -- the file may have been + * incompletely written with garbage, which is harmless to add + * but may not be as unpredictable as alleged. + * + * XXX There is a byte order dependency here... + */ + SHA1Init(&ctx); + SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy)); + SHA1Update(&ctx, seed->data, sizeof(seed->data)); + SHA1Final(digest, &ctx); + CTASSERT(sizeof(seed->digest) == sizeof(digest)); + if (!consttime_memequal(digest, seed->digest, sizeof(digest))) { + printf("entropy: invalid seed checksum\n"); + seed->entropy = 0; + } + explicit_memset(&ctx, 0, sizeof &ctx); + explicit_memset(digest, 0, sizeof digest); + + /* + * If the entropy subsystem is not yet initialized, just save + * it to be entered in entropy_init. + */ + if (!E->initialized) { + if (E->seed) { + /* Not much to do if MD bootloader seeds twice! */ + printf("entropy: double-seeded by bootloader\n"); + } else { + printf("entropy: preloading seed from bootloader\n"); + E->seed = seed; + } + return; + } + + /* + * Determine whether it has already been seeded. If it was + * already seeded, we may be re-entering it (e.g., bootloader + * vs some other mechanism). There's no harm in entering it + * twice, but it contributes no additional entropy. + */ + mutex_enter(&E->lock); + if (E->seed) { + printf("entropy: re-entering seed from bootloader\n"); + seed->entropy = 0; + } else { + printf("entropy: entering seed from bootloader\n"); + E->seed = seed; + } + mutex_exit(&E->lock); + + /* Enter it into the pool. */ + entropy_enter(seed->data, sizeof(seed->data), seed->entropy); + explicit_memset(seed, 0, sizeof(*seed)); + + /* + * If the bootloader claimed to provide any entropy, mark + * ourselves as seeded by the bootloader. + */ + if (seed->entropy > 0) { + mutex_enter(&E->lock); + KASSERT(!E->boot_seeded); + atomic_store_relaxed(&E->boot_seeded, true); + mutex_exit(&E->lock); + } +} + +/* + * entropy_bootrequest() + * + * Request entropy from all sources at boot, once config is + * complete and interrupts are running. + */ +void +entropy_bootrequest(void) +{ + + KASSERT(E->initialized); + + /* + * Simply request enough to satisfy the maximum entropy debt. + * This is harmless overkill if the bootloader provided a seed. + */ + mutex_enter(&E->lock); + entropy_request(howmany(ENTROPY_MAXDEBT, NBBY)); + mutex_exit(&E->lock); +} + +/* + * entropy_epoch() + * + * Returns the current entropy epoch. If this changes, you should + * reseed. Guaranteed never to be zero after initial seeding. + * + * Usage model: + * + * struct foo { + * struct crypto_prng prng; + * unsigned epoch; + * } *foo; + * + * unsigned epoch = entropy_epoch(); + * if (__predict_false(epoch != foo->epoch)) { + * uint8_t seed[32]; + * if (entropy_extract(seed, sizeof seed, 0) != 0) + * warn("no entropy"); + * crypto_prng_reseed(&foo->prng, seed, sizeof seed); + * foo->epoch = epoch; + * } + */ +unsigned +entropy_epoch(void) +{ + unsigned epoch; + + KASSERT(E->initialized); + + /* + * Unsigned int, so no need for seqlock for an atomic read, but + * make sure we read only once before whatever decision the + * caller is about to make. + */ + epoch = atomic_load_relaxed(&E->epoch); + __insn_barrier(); + + return epoch; +} + +/* + * entropy_credit_cpu(ec) + * + * Credit the bits of entropy on the current CPU to the global + * pool. + * + * - If this would pay off the entropy debt, do so immediately. + * + * - If this and whatever else is available on other CPUs would + * pay off the entropy debt, kick the consolidation thread. + * + * - Otherwise, do as little as possible, except maybe consolidate + * entropy at most once a minute. + * + * Caller must have exclusive access to ec. Will acquire and + * release the global entropy pool lock. + */ +static void +entropy_credit_cpu(struct entropy_cpu *ec) +{ + unsigned diff; + + /* + * If there's no entropy debt, and entropy has been + * consolidated in the last minute, do nothing. + */ + if (__predict_true(atomic_load_relaxed(&E->debt) == 0) && + __predict_true(!atomic_load_relaxed(&entropy_depletion)) && + __predict_true((atomic_load_relaxed(&time_uptime) - + atomic_load_relaxed(&E->timestamp)) + <= 60)) + return; + + /* If there's nothing to credit, stop here. */ + if (ec->ec_credit == 0) + return; + + /* Acquire the lock to try paying off the entropy debt. */ + mutex_enter(&E->lock); + + if (E->debt != 0 && E->debt <= ec->ec_credit) { + /* + * If there is a nonzero debt we can pay off now, do + * it. This way we disseminate entropy promptly when + * it becomes available. + */ + uint8_t buf[ENTROPY_CAPACITY]; + + /* Transfer from the local pool to the global pool. */ + entpool_extract(ec->ec_pool, buf, sizeof buf); + entpool_enter(&E->pool, buf, sizeof buf); + ec->ec_credit = 0; + atomic_store_relaxed(&E->debt, 0); + + /* Notify waiters that we now have full entropy. */ + entropy_notify(); + entropy_immediate_evcnt.ev_count++; + } else if (ec->ec_credit) { + /* Record how much we can credit to the global pool. */ + diff = MIN(ec->ec_credit, ENTROPY_MAXDEBT - E->credit); + E->credit += diff; + ec->ec_credit -= diff; + + /* + * This should have made a difference, so there should + * be entropy to credit globally. + */ + KASSERT(diff); + KASSERT(E->credit); + + if (E->debt <= E->credit) { + /* + * Debt can be paid off in full by drawing from + * the per-CPU entropy pools. Wake up the + * housekeeping thread. + * + * If the debt is already zero, this doesn't + * mean much, but it is the only time we ever + * gather additional entropy in case the + * accounting has been overly optimistic. This + * happens at most once a minute, so there's + * negligible performance cost. + * + * XXX This is not quite right because there's + * no timer, so the per-CPU pools could be full + * if they filled within the once-per-minute + * rate limit, and never be consolidated. + */ + E->consolidate = true; + cv_broadcast(&E->cv); + if (E->debt == 0) + entropy_discretionary_evcnt.ev_count++; + } else { + /* Can't pay it off. */ + entropy_partial_evcnt.ev_count++; + } + } + + /* Release the lock. */ + mutex_exit(&E->lock); +} + +/* + * entropy_enter_cold(buf, len, nbits) + * + * Do entropy bookkeeping globally, before we have established + * per-CPU pools. + */ +static void +entropy_enter_cold(const void *buf, size_t len, unsigned nbits) +{ + + mutex_enter(&E->lock); + entpool_enter(&E->pool, buf, len); + E->debt -= MIN(E->debt, nbits); + if (E->debt == 0) { + entropy_notify(); + entropy_immediate_evcnt.ev_count++; + } + mutex_exit(&E->lock); +} + +/* + * entropy_enter(buf, len, nbits) + * + * Enter len bytes of data from buf into the system's entropy + * pool, permuting the state as necessary when the internal buffer + * fills up. nbits is a lower bound on the number of bits of + * entropy in the process that led to this sample. + */ +static struct evcnt entropy_enter_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", + "enter"); +EVCNT_ATTACH_STATIC(entropy_enter_evcnt); +static void +entropy_enter(const void *buf, size_t len, unsigned nbits) +{ + struct entropy_cpu *ec; + int s; + + KASSERT(E->initialized); + KASSERTMSG(!cpu_intr_p(), + "use entropy_enter_intr from interrupt context"); + + entropy_enter_evcnt.ev_count++; + + if (__predict_false(!atomic_load_relaxed(&E->fully_ready))) { + entropy_enter_cold(buf, len, nbits); + return; + } + + /* + * Acquire the per-CPU state, blocking soft interrupts and + * causing hard interrupts to drop samples on the floor. + */ + ec = percpu_getref(entropy_percpu); + s = splsoftclock(); + KASSERT(!ec->ec_locked); + ec->ec_locked = true; + __insn_barrier(); + + /* Enter into the per-CPU pool. */ + entpool_enter(ec->ec_pool, buf, len); + + /* Count up what we can credit. */ + ec->ec_credit += MIN(ENTROPY_MAXDEBT - ec->ec_credit, nbits); + + /* Credit to the global pool if appropriate. */ + entropy_credit_cpu(ec); + + /* Release the per-CPU state. */ + KASSERT(ec->ec_locked); + __insn_barrier(); + ec->ec_locked = false; + splx(s); + percpu_putref(entropy_percpu); +} + +/* + * entropy_enter_intr(buf, len, nbits) + * + * Enter up to len bytes of data from buf into the system's + * entropy pool. nbits is a lower bound on the number of bits of + * entropy in the process that led to this sample. If the sample + * could be entered completely, credit nbits of entropy; otherwise + * credit nothing. Schedule a softint to stir the entropy pool if + * needed. Return true if used fully, false if truncated at all. + * + * Using this in thread context will work, but you might as well + * use entropy_enter in that case. + */ +static bool +entropy_enter_intr(const void *buf, size_t len, unsigned nbits) +{ + struct entropy_cpu *ec; + bool fullyused = false; + + KASSERT(E->initialized); + + /* If it's too early after boot, just use entropy_enter. */ + if (__predict_false(!atomic_load_relaxed(&E->fully_ready))) { + entropy_enter_cold(buf, len, nbits); + return true; + } + + /* + * Acquire the per-CPU state. If someone is in the middle of + * using it, drop the sample. Otherwise, take the lock so that + * higher-priority interrupts will drop their samples. + */ + ec = percpu_getref(entropy_percpu); + if (ec->ec_locked) + goto out0; + ec->ec_locked = true; + __insn_barrier(); + + /* + * Enter as much as we can into the per-CPU pool. If it was + * truncated, schedule a softint to stir the pool and stop. + */ + if (!entpool_enter_nostir(ec->ec_pool, buf, len)) { + softint_schedule(entropy_sih); + goto out1; + } + fullyused = true; + + /* If there remains entropy debt, try to pay it off. */ + if (__predict_false(atomic_load_relaxed(&E->debt) != 0 || + atomic_load_relaxed(&entropy_depletion)) && + nbits != 0) { + ec->ec_credit += MIN(ENTROPY_MAXDEBT - ec->ec_credit, nbits); + softint_schedule(entropy_sih); + } + +out1: /* Release the per-CPU state. */ + KASSERT(ec->ec_locked); + __insn_barrier(); + ec->ec_locked = false; +out0: percpu_putref(entropy_percpu); + + return fullyused; +} + +/* + * entropy_softintr(cookie) + * + * Soft interrupt handler for entering entropy. Takes care of + * stirring the local CPU's entropy pool if it filled up during + * hard interrupts, and promptly crediting entropy from the local + * CPU's entropy pool to the global entropy pool if needed. + */ +static void +entropy_softintr(void *cookie) +{ + struct entropy_cpu *ec; + + /* + * Acquire the per-CPU state. Other users can lock this only + * while soft interrupts are blocked. Cause hard interrupts to + * drop samples on the floor. + */ + ec = percpu_getref(entropy_percpu); + KASSERT(!ec->ec_locked); + ec->ec_locked = true; + __insn_barrier(); + + /* Count statistics. */ + ec->ec_softint_evcnt->ev_count++; + + /* Stir the pool if necessary. */ + entpool_stir(ec->ec_pool); + + /* Credit to the global pool if appropriate. */ + entropy_credit_cpu(ec); + + /* Release the per-CPU state. */ + KASSERT(ec->ec_locked); + __insn_barrier(); + ec->ec_locked = false; + percpu_putref(entropy_percpu); +} + +/* + * entropy_thread(cookie) + * + * Handle any asynchronous entropy housekeeping. + */ +static void +entropy_thread(void *cookie) +{ + bool consolidate; + + for (;;) { + /* Wait until someone wants to consolidate. */ + mutex_enter(&E->lock); + while (!E->consolidate) + cv_wait(&E->cv, &E->lock); + consolidate = E->consolidate; + E->consolidate = false; + mutex_exit(&E->lock); + + /* Do it. */ + if (consolidate) + entropy_consolidate(); + + /* Mitigate abuse. */ + kpause("entropy", false, hz, NULL); + } +} + +/* + * entropy_consolidate() + * + * Issue a cross-call to gather entropy on all CPUs, and update + * the global entropy debt. + */ +static void +entropy_consolidate(void) +{ + unsigned diff; + uint64_t ticket; + + /* Gather entropy on all CPUs. */ + ticket = xc_broadcast(0, &entropy_gather_xc, NULL, NULL); + xc_wait(ticket); + + /* Acquire the lock to update the debt and notify waiters. */ + mutex_enter(&E->lock); + + /* Note when we last consolidated, i.e. now. */ + atomic_store_relaxed(&E->timestamp, atomic_load_relaxed(&time_uptime)); + + /* Credit the entropy that was gathered, and notify if now full. */ + diff = MIN(E->debt, E->credit); + atomic_store_relaxed(&E->debt, E->debt - diff); + E->credit -= diff; + if (E->debt == 0) + entropy_notify(); + + /* Count another consolidation now that it's done. */ + entropy_consolidate_evcnt.ev_count++; + + /* Release the lock. */ + mutex_exit(&E->lock); +} + +/* + * entropy_gather_xc(arg1, arg2) + * + * Extract output from the local CPU's input pool and enter it + * into the global pool. + */ +static void +entropy_gather_xc(void *arg1 __unused, void *arg2 __unused) +{ + struct entropy_cpu *ec; + uint8_t buf[ENTPOOL_CAPACITY]; + uint32_t extra[7]; + unsigned i = 0; + int s; + + /* Grab CPU number and cycle counter to mix extra into the pool. */ + extra[i++] = cpu_number(); + extra[i++] = entropy_timer(); + + /* + * Acquire the per-CPU state, blocking soft interrupts and + * discarding entropy in hard interrupts, so that we can + * extract from the per-CPU pool. + */ + ec = percpu_getref(entropy_percpu); + s = splsoftclock(); + KASSERT(!ec->ec_locked); + ec->ec_locked = true; + __insn_barrier(); + extra[i++] = entropy_timer(); + + /* Extract the data. */ + entpool_extract(ec->ec_pool, buf, sizeof buf); + extra[i++] = entropy_timer(); + + /* Release the per-CPU state. */ + KASSERT(ec->ec_locked); + __insn_barrier(); + ec->ec_locked = false; + splx(s); + percpu_putref(entropy_percpu); + extra[i++] = entropy_timer(); + + /* + * Copy over statistics, and enter the per-CPU extract and the + * extra timing into the global pool, under the global lock. + */ + mutex_enter(&E->lock); + extra[i++] = entropy_timer(); + entpool_enter(&E->pool, buf, sizeof buf); + explicit_memset(buf, 0, sizeof buf); + extra[i++] = entropy_timer(); + KASSERT(i == __arraycount(extra)); + entpool_enter(&E->pool, extra, sizeof extra); + explicit_memset(extra, 0, sizeof extra); + mutex_exit(&E->lock); +} + +/* + * entropy_notify() + * + * Caller just paid off the entropy debt. Advance the entropy + * epoch and notify waiters. + * + * Caller must hold the global entropy lock, and must have just + * changed the entropy debt from nonzero to zero. + */ +static void +entropy_notify(void) +{ + + KASSERT(mutex_owned(&E->lock)); + KASSERT(E->debt == 0); + + entropy_notify_evcnt.ev_count++; + + rnd_initial_entropy = 1; /* legacy */ + atomic_store_relaxed(&E->epoch, MAX(1, E->epoch + 1)); + cv_broadcast(&E->cv); + selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT); +} + +/* + * sysctl kern.entropy.consolidate + * + * Trigger entropy consolidation. Writable only by superuser. + */ +static int +sysctl_entropy_consolidate(SYSCTLFN_ARGS) +{ + struct sysctlnode node = *rnode; + int arg; + int error; + + node.sysctl_data = &arg; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); + if (error || newp == NULL) + return error; + if (arg) { + mutex_enter(&E->lock); + E->consolidate = true; + cv_broadcast(&E->cv); + mutex_exit(&E->lock); + } + + return 0; +} + +/* + * entropy_extract(buf, len, flags) + * + * Extract len bytes from the global entropy pool into buf. + * + * Flags may have: + * + * ENTROPY_WAIT Wait for entropy if not available yet. + * ENTROPY_SIG Allow interruption by a signal during wait. + * + * Return zero on success, or error on failure: + * + * EWOULDBLOCK No entropy and ENTROPY_WAIT not set. + * EINTR/ERESTART No entropy, ENTROPY_SIG set, and interrupted. + * + * If ENTROPY_WAIT is set, allowed only in thread context. If + * ENTROPY_WAIT is not set, allowed up to IPL_VM. (XXX That's + * awfully high... Do we really need it in hard interrupts? This + * arises from use of cprng_strong(9).) + */ +int +entropy_extract(void *buf, size_t len, int flags) +{ + int error; + + if (ISSET(flags, ENTROPY_WAIT)) + ASSERT_SLEEPABLE(); + KASSERT(E->initialized); + KASSERT(atomic_load_relaxed(&E->fully_ready)); + + /* Acquire the global lock to get at the global pool. */ + mutex_enter(&E->lock); + + /* Count up request for entropy in interrupt context. */ + if (cpu_intr_p()) + entropy_extract_intr_evcnt.ev_count++; + + /* Wait until there is enough entropy in the system. */ + error = 0; + while (E->debt) { + /* Ask for more, synchronously if possible. */ + entropy_request(len); + + /* If we got enough, we're done. */ + if (E->debt == 0) { + KASSERT(error == 0); + break; + } + + /* Wait for some to come in, according to flags. */ + if (!ISSET(flags, ENTROPY_WAIT)) { + error = EWOULDBLOCK; + break; + } else if (ISSET(flags, ENTROPY_SIG)) { + error = cv_wait_sig(&E->cv, &E->lock); + if (error) + break; + } else { + cv_wait(&E->cv, &E->lock); + } + } + + /* + * What can we do if we fail to wait? It is tempting to use + * whatever we _do_ have in the entropy pool, so the caller + * gets _something_. But this is a bad idea. If we have only + * (say) 1 bit of entropy, then drawing anything from the pool + * likely tells the adversary which bit it was. Then even if + * we get another bit of entropy, the adversary can repeat, and + * after 256 bits, they learn not only what the current outputs + * are but what _all subsequent_ outputs will be -- `iterative + * guessing attacks'. + * + * In contrast, if we refuse to return anything from the pool + * until we have gathered 256 bits at a time, the adversary may + * be able to guess early outputs but will be unable to guess + * later ones. Neither situation is good, but choosing to + * return something from a partially filled pool is worse. + */ + if (error) { + entropy_extract_fail_evcnt.ev_count++; + memset(buf, 0, len); + goto out; + } + + /* Extract data from the pool, and `deplete' if we're doing that. */ + entpool_extract(&E->pool, buf, len); + if (__predict_false(atomic_load_relaxed(&entropy_depletion)) && + error == 0) { + unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY; + + atomic_store_relaxed(&E->debt, + E->debt + MIN(ENTROPY_MAXDEBT - E->debt, cost)); + entropy_deplete_evcnt.ev_count++; + } + +out: /* Release the global lock and return the error. */ + mutex_exit(&E->lock); + return error; +} + +/* + * entropy_poll(events) + * + * Return the subset of events ready, and if it is not all of + * events, record curlwp as waiting for entropy. + */ +int +entropy_poll(int events) +{ + int revents = 0; + + KASSERT(E->initialized); + + /* Always ready for writing. */ + revents |= events & (POLLOUT|POLLWRNORM); + + /* Narrow it down to reads. */ + events &= POLLIN|POLLRDNORM; + if (events == 0) + return revents; + + /* + * If the debt is already zero and we're not depleting entropy, + * we are forever ready. + */ + if (__predict_true(atomic_load_relaxed(&E->debt) == 0) && + __predict_true(!atomic_load_relaxed(&entropy_depletion))) + return revents | events; + + /* + * Otherwise, check the debt under the lock. If it's zero, + * we're ready; if not, add ourselves to the queue. + */ + mutex_enter(&E->lock); + if (E->debt == 0) + revents |= events; + else + selrecord(curlwp, &E->selq); + mutex_exit(&E->lock); + + return revents; +} + +/* + * filt_entropy_read_detach(kn) + * + * struct filterops::f_detach callback for entropy read events: + * remove kn from the list of waiters. + */ +static void +filt_entropy_read_detach(struct knote *kn) +{ + + mutex_enter(&E->lock); + SLIST_REMOVE(&E->selq.sel_klist, kn, knote, kn_selnext); + mutex_exit(&E->lock); +} + +/* + * filt_entropy_read_event(kn, hint) + * + * struct filterops::f_event callback for entropy read events: + * poll for entropy. Caller must hold the global entropy lock if + * hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT. + */ +static int +filt_entropy_read_event(struct knote *kn, long hint) +{ + int ret; + + /* Acquire the lock, if caller is outside entropy subsystem. */ + if (hint == NOTE_SUBMIT) + KASSERT(mutex_owned(&E->lock)); + else + mutex_enter(&E->lock); + + /* + * If there's entropy debt, can't read anything; if not, can + * read arbitrarily much. + */ + if (E->debt != 0) { + ret = 0; + } else { + if (atomic_load_relaxed(&entropy_depletion)) + kn->kn_data = ENTROPY_MAXDEBT; + else + kn->kn_data = MIN(INT64_MAX, SSIZE_MAX); + ret = 1; + } + + /* Release the lock, if caller is outside entropy subsystem. */ + if (hint == NOTE_SUBMIT) + KASSERT(mutex_owned(&E->lock)); + else + mutex_exit(&E->lock); + + return ret; +} + +static const struct filterops entropy_read_filtops = { + .f_isfd = 1, /* XXX Makes sense only for /dev/u?random. */ + .f_attach = NULL, + .f_detach = filt_entropy_read_detach, + .f_event = filt_entropy_read_event, +}; + +/* + * entropy_kqfilter(kn) + * + * Register kn to receive entropy event notifications. May be + * EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL. + */ +int +entropy_kqfilter(struct knote *kn) +{ + + KASSERT(E->initialized); + + switch (kn->kn_filter) { + case EVFILT_READ: + /* Enter into the global select queue. */ + mutex_enter(&E->lock); + kn->kn_fop = &entropy_read_filtops; + SLIST_INSERT_HEAD(&E->selq.sel_klist, kn, kn_selnext); + mutex_exit(&E->lock); + return 0; + case EVFILT_WRITE: + /* Can always dump entropy into the system. */ + kn->kn_fop = &seltrue_filtops; + return 0; + default: + return EINVAL; + } +} + +/* + * Entropy sources. Usage model: + * + * 1. Allocate and zero a struct krndsource. + * 2. Optionally, set a callback with rndsource_setcb. + * 3. Attach it with rnd_attach_source. + * 4. Use rnd_add_data with it. + * 5. Detach it with rnd_detach_source. + * 6. Free the struct krndsource. + */ + +/* + * rndsource_setcb(rs, request, requestarg) + * + * Set the request callback for the entropy source rs, if it can + * provide entropy on demand. Must be done before + * rnd_attach_source. + */ +void +rndsource_setcb(struct krndsource *rs, void (*request)(size_t, void *), + void *requestarg) +{ + + rs->rs_request = request; + rs->rs_requestarg = requestarg; +} + +/* + * rnd_attach_source(rs, name, type, flags) + * + * Attach the entropy source rs. Must be done after + * rndsource_setcb, if any, and before any calls to rnd_add_data. + */ +void +rnd_attach_source(struct krndsource *rs, const char *name, int type, int flags) +{ + uint32_t extra[5]; + unsigned i = 0; + + KASSERT(E->initialized); + + /* Grab CPU number and cycle counter to mix extra into the pool. */ + extra[i++] = cpu_number(); + extra[i++] = entropy_timer(); + + /* + * Apply some standard flags: + * + * - We do not bother with network devices by default, for + * hysterical raisins (perhaps: because it is often the case + * that an adversary can influence network packet timings). + */ + switch (type) { + case RND_TYPE_NET: + flags |= RND_FLAG_NO_COLLECT; + break; + } + + /* Sanity-check the callback if RND_FLAG_HASCB is set. */ + KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->rs_request != NULL); + + /* Initialize the random source. */ + memset(rs->rs_name, 0, sizeof(rs->rs_name)); /* paranoia */ + strlcpy(rs->rs_name, name, sizeof(rs->rs_name)); + rs->rs_type = type; + rs->rs_flags = flags; + rs->rs_percpu = percpu_alloc(sizeof(struct rndsource_cpu)); + extra[i++] = entropy_timer(); + + /* Wire it into the global list of random sources. */ + mutex_enter(&E->lock); + LIST_INSERT_HEAD(&E->sources, rs, rs_list); + mutex_exit(&E->lock); + extra[i++] = entropy_timer(); + + /* Request that it provide entropy ASAP, if we can. */ + if (ISSET(flags, RND_FLAG_HASCB)) + (*rs->rs_request)(ENTROPY_CAPACITY, rs->rs_requestarg); + extra[i++] = entropy_timer(); + + /* Mix the extra into the pool. */ + KASSERT(i == __arraycount(extra)); + entropy_enter(extra, sizeof extra, 0); +} + +/* + * rnd_detach_source(rs) + * + * Detach the entropy source rs. May sleep waiting for users to + * drain. Further use is not allowed. + */ +void +rnd_detach_source(struct krndsource *rs) +{ + + KASSERT(E->initialized); + + /* We may wait for other users drain. */ + ASSERT_SLEEPABLE(); + + /* Remove it from the list and wait for entropy_request. */ + mutex_enter(&E->lock); + LIST_REMOVE(rs, rs_list); + while (E->requesting) + cv_wait(&E->cv, &E->lock); + mutex_exit(&E->lock); +} + +/* + * entropy_request(n) + * + * Request n bytes of entropy from all sources in the system. OK + * if we overdo it. Caller must hold the global entropy lock; + * will release and re-acquire it. + */ +static void +entropy_request(size_t nbytes) +{ + struct krndsource *rs, *next; + + KASSERT(mutex_owned(&E->lock)); + + /* + * If there is a request in progress, let it proceed. + * Otherwise, note that a request is in progress to avoid + * reentry and to block rnd_detach_source until we're done. + */ + if (E->requesting) + return; + E->requesting = true; + entropy_request_evcnt.ev_count++; + + /* Clamp to the maximum reasonable request. */ + nbytes = MIN(nbytes, ENTROPY_CAPACITY); + + /* Walk the list of sources. */ + LIST_FOREACH_SAFE(rs, &E->sources, rs_list, next) { + /* Skip sources without callbacks. */ + if (!ISSET(rs->rs_flags, RND_FLAG_HASCB)) + continue; + + /* Drop the lock while we call the callback. */ + mutex_exit(&E->lock); + (*rs->rs_request)(nbytes, rs->rs_requestarg); + mutex_enter(&E->lock); + } + + /* Notify rnd_detach_source that the request is done. */ + E->requesting = false; + cv_broadcast(&E->cv); +} + +/* + * rnd_add_uint32(rs, value) + * + * Enter 32 bits of data from an entropy source into the pool. + * + * If rs is NULL, may not be called from interrupt context. + * + * If rs is non-NULL, may be called from any context. May drop + * data if called from interrupt context. + */ +void +rnd_add_uint32(struct krndsource *rs, uint32_t value) +{ + + KASSERT(E->initialized); + rnd_add_data(rs, &value, sizeof value, 0); +} + +/* + * rnd_add_data(rs, buf, len, entropybits) + * + * Enter data from an entropy source into the pool, with a + * driver's estimate of how much entropy the physical source of + * the data has. If RND_FLAG_NO_ESTIMATE, we ignore the driver's + * estimate and treat it as zero. + * + * If rs is NULL, may not be called from interrupt context. + * + * If rs is non-NULL, may be called from any context. May drop + * data if called from interrupt context. + */ +void +rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len, + uint32_t entropybits) +{ + uint32_t extra[3]; + unsigned i = 0; + uint32_t flags; + + KASSERT(E->initialized); + + /* If there's no rndsource, just enter the data and time now. */ + if (rs == NULL) { + extra[i++] = cpu_number(); + extra[i++] = entropy_timer(); + entropy_enter(buf, len, entropybits); + extra[i++] = entropy_timer(); + KASSERT(i == __arraycount(extra)); + entropy_enter(extra, sizeof extra, 0); + return; + } + + /* Load a snapshot of the flags. Ioctl may change them under us. */ + flags = atomic_load_relaxed(&rs->rs_flags); + + /* + * Skip if: + * - we're not collecting entropy, or + * - the operator doesn't want to collect entropy from this, or + * - neither data nor timings are being collected from this. + */ + if (!atomic_load_relaxed(&entropy_collection) || + ISSET(flags, RND_FLAG_NO_COLLECT) || + !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME)) + return; + + /* If asked, ignore the estimate. */ + if (ISSET(flags, RND_FLAG_NO_ESTIMATE)) + entropybits = 0; + + /* Grab CPU number and cycle counter to mix extra into the pool. */ + extra[i++] = cpu_number(); + extra[i++] = entropy_timer(); + + /* If we are collecting data, enter them. */ + if (ISSET(flags, RND_FLAG_COLLECT_VALUE)) + rnd_add_data_1(rs, buf, len, entropybits); + extra[i++] = entropy_timer(); + + /* If we are collecting timings, enter them. */ + KASSERT(i == __arraycount(extra)); + if (ISSET(flags, RND_FLAG_COLLECT_TIME)) + rnd_add_data_1(rs, extra, sizeof extra, 0); +} + +/* + * rnd_add_data_1(rs, buf, len, entropybits) + * + * Internal subroutine to either call entropy_enter_intr, if we're + * in interrupt context, or entropy_enter if not, and credit the + * entropy. + */ +static void +rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len, + uint32_t entropybits) +{ + bool fullyused; + + /* + * If we're in interrupt context, use entropy_enter_intr and + * take note of whether it consumed the full sample; if not, + * use entropy_enter, which always consumes the full sample. + */ + if (cpu_intr_p()) { + fullyused = entropy_enter_intr(buf, len, entropybits); + } else { + entropy_enter(buf, len, entropybits); + fullyused = true; + } + + /* + * If we used the full sample, note how many bits were + * contributed from this source. + */ + if (fullyused) { + /* + * XXX Count the number of bits we physically enter for + * now; we really want to split this up between bits of + * _data_ and bits of _entropy_ for operator + * diagnostics. + */ + entropybits = MIN(len, UINT32_MAX/NBBY)*NBBY; + if (!atomic_load_relaxed(&E->fully_ready)) { + mutex_enter(&E->lock); + rs->rs_nbits_early += + MIN(UINT_MAX - rs->rs_nbits_early, entropybits); + mutex_exit(&E->lock); + } else { + struct rndsource_cpu *rc = + percpu_getref(rs->rs_percpu); + + atomic_store_relaxed(&rc->rc_nbits, + (rc->rc_nbits + + MIN(UINT_MAX - rc->rc_nbits, entropybits))); + percpu_putref(rs->rs_percpu); + } + } +} + +/* + * rnd_add_data_sync(rs, buf, len, entropybits) + * + * Same as rnd_add_data. Originally used in rndsource callbacks, + * to break an unnecessary cycle; no longer really needed. + */ +void +rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len, + uint32_t entropybits) +{ + + KASSERT(E->initialized); + rnd_add_data(rs, buf, len, entropybits); +} + +static void +rndsource_countbits(void *ptr, void *cookie, struct cpu_info *ci) +{ + struct rndsource_cpu *rc = ptr; + unsigned *nbitsp = cookie; + + *nbitsp += MIN(UINT_MAX - *nbitsp, atomic_load_relaxed(&rc->rc_nbits)); +} + +/* + * rndsource_to_user(rs, urs) + * + * Copy a description of rs out to urs for userland. + */ +static void +rndsource_to_user(struct krndsource *rs, rndsource_t *urs) +{ + unsigned nbits = 0; + + KASSERT(mutex_owned(&E->lock)); + + /* Count the bits on all CPUs -- approximate answers OK. */ + percpu_foreach(rs->rs_percpu, rndsource_countbits, &nbits); + nbits += MIN(UINT_MAX - nbits, rs->rs_nbits_early); + + /* Avoid kernel memory disclosure. */ + memset(urs, 0, sizeof(*urs)); + + CTASSERT(sizeof(urs->name) == sizeof(rs->rs_name)); + strlcpy(urs->name, rs->rs_name, sizeof(urs->name)); + urs->total = nbits; + urs->type = rs->rs_type; + urs->flags = atomic_load_relaxed(&rs->rs_flags); +} + +/* + * rndsource_to_user_est(rs, urse) + * + * Copy a description of rs and estimation statistics out to urse + * for userland. + */ +static void +rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse) +{ + + KASSERT(mutex_owned(&E->lock)); + + /* Avoid kernel memory disclosure. */ + memset(urse, 0, sizeof(*urse)); + + /* Copy out the rndsource description. */ + rndsource_to_user(rs, &urse->rt); + + /* Zero out the statistics because we don't do estimation. */ + urse->dt_samples = 0; + urse->dt_total = 0; + urse->dv_samples = 0; + urse->dv_total = 0; +} + +/* + * entropy_ioctl(cmd, data) + * + * Handle various /dev/random ioctl queries. + */ +int +entropy_ioctl(unsigned long cmd, void *data) +{ + struct krndsource *rs; + bool privileged; + int error; + + KASSERT(E->initialized); + + /* Verify user's authorization to perform the ioctl. */ + switch (cmd) { + case RNDGETENTCNT: + case RNDGETPOOLSTAT: + case RNDGETSRCNUM: + case RNDGETSRCNAME: + case RNDGETESTNUM: + case RNDGETESTNAME: + error = kauth_authorize_device(curlwp->l_cred, + KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); + break; + case RNDCTL: + error = kauth_authorize_device(curlwp->l_cred, + KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); + break; + case RNDADDDATA: + error = kauth_authorize_device(curlwp->l_cred, + KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); + /* + * Ascertain whether the user's inputs should be + * contributed to the entropy debt or not. + */ + if (kauth_authorize_device(curlwp->l_cred, + KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, + NULL, NULL, NULL, NULL) == 0) + privileged = true; + break; + default: + MODULE_HOOK_CALL(rnd_ioctl_50_hook, (cmd, data), + enosys(), error); +#if defined(_LP64) + if (error == ENOSYS) + MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (cmd, data), + enosys(), error); +#endif + if (error == ENOSYS) + error = ENOTTY; + break; + } + + /* If anything went wrong with authorization, stop here. */ + if (error) + return error; + + /* Disptach on the command. */ + switch (cmd) { + case RNDGETENTCNT: { /* Get current entropy count in bits. */ + uint32_t *countp = data; + + mutex_enter(&E->lock); + *countp = ENTROPY_MAXDEBT - E->debt; + mutex_exit(&E->lock); + + break; + } + case RNDGETPOOLSTAT: { /* Get entropy pool statistics. */ + rndpoolstat_t *pstat = data; + + mutex_enter(&E->lock); + + /* parameters */ + pstat->poolsize = ENTROPY_SIZE/sizeof(uint32_t); /* words */ + pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */ + pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */ + + /* state */ + pstat->added = 0; /* XXX total entropy_enter count */ + pstat->curentropy = ENTROPY_MAXDEBT - E->debt; + pstat->removed = 0; /* XXX total entropy_extract count */ + pstat->discarded = 0; /* XXX bits of entropy beyond capacity */ + pstat->generated = 0; /* XXX bits of data...fabricated? */ + + mutex_exit(&E->lock); + break; + } + case RNDGETSRCNUM: { /* Get entropy sources by number. */ + rndstat_t *stat = data; + uint32_t start = 0, i = 0; + + /* Skip if none requested; fail if too many requested. */ + if (stat->count == 0) + break; + if (stat->count > RND_MAXSTATCOUNT) + return EINVAL; + + /* + * Under the lock, find the first one, copy out as many + * as requested, and report how many we copied out. + */ + mutex_enter(&E->lock); + LIST_FOREACH(rs, &E->sources, rs_list) { + if (start++ == stat->start) + break; + } + while (i < stat->count && rs != NULL) { + rndsource_to_user(rs, &stat->source[i++]); + rs = LIST_NEXT(rs, rs_list); + } + KASSERT(i <= stat->count); + stat->count = i; + mutex_exit(&E->lock); + break; + } + case RNDGETESTNUM: { /* Get sources and estimates by number. */ + rndstat_est_t *estat = data; + uint32_t start = 0, i = 0; + + /* Skip if none requested; fail if too many requested. */ + if (estat->count == 0) + break; + if (estat->count > RND_MAXSTATCOUNT) + return EINVAL; + + /* + * Under the lock, find the first one, copy out as many + * as requested, and report how many we copied out. + */ + mutex_enter(&E->lock); + LIST_FOREACH(rs, &E->sources, rs_list) { + if (start++ == estat->start) + break; + } + while (i < estat->count && rs != NULL) { + rndsource_to_user_est(rs, &estat->source[i++]); + rs = LIST_NEXT(rs, rs_list); + } + KASSERT(i <= estat->count); + estat->count = i; + mutex_exit(&E->lock); + break; + } + case RNDGETSRCNAME: { /* Get entropy sources by name. */ + rndstat_name_t *nstat = data; + const size_t n = sizeof(rs->rs_name); + + CTASSERT(sizeof(rs->rs_name) == sizeof(nstat->name)); + + /* + * Under the lock, search by name. If found, copy it + * out; if not found, fail with ENOENT. + */ + mutex_enter(&E->lock); + LIST_FOREACH(rs, &E->sources, rs_list) { + if (strncmp(rs->rs_name, nstat->name, n) == 0) + break; + } + if (rs != NULL) + rndsource_to_user(rs, &nstat->source); + else + error = ENOENT; + mutex_exit(&E->lock); + break; + } + case RNDGETESTNAME: { /* Get sources and estimates by name. */ + rndstat_est_name_t *enstat = data; + const size_t n = sizeof(rs->rs_name); + + CTASSERT(sizeof(rs->rs_name) == sizeof(enstat->name)); + + /* + * Under the lock, search by name. If found, copy it + * out; if not found, fail with ENOENT. + */ + mutex_enter(&E->lock); + LIST_FOREACH(rs, &E->sources, rs_list) { + if (strncmp(rs->rs_name, enstat->name, n) == 0) + break; + } + if (rs != NULL) + rndsource_to_user_est(rs, &enstat->source); + else + error = ENOENT; + mutex_exit(&E->lock); + break; + } + case RNDCTL: { /* Modify entropy source flags. */ + rndctl_t *rndctl = data; + const size_t n = sizeof(rs->rs_name); + uint32_t flags; + + CTASSERT(sizeof(rs->rs_name) == sizeof(rndctl->name)); + + /* + * For each matching rndsource, either by type if + * specified or by name if not, set the masked flags. + */ + mutex_enter(&E->lock); + LIST_FOREACH(rs, &E->sources, rs_list) { + if (rndctl->type != 0xff) { + if (rs->rs_type != rndctl->type) + continue; + } else { + if (strncmp(rs->rs_name, rndctl->name, n) != 0) + continue; + } + flags = rs->rs_flags & ~rndctl->mask; + flags |= rndctl->flags & rndctl->mask; + atomic_store_relaxed(&rs->rs_flags, flags); + } + mutex_exit(&E->lock); + break; + } + case RNDADDDATA: { /* Enter data into entropy pool. */ + rnddata_t *rdata = data; + unsigned entropybits = 0; + + /* + * This ioctl primarily serves as the userland + * alternative to a seed provided by a bootloader -- + * for example, from an rc.d script. We accept the + * user's entropy claim only if + * + * (a) the user is privileged, and + * (b) we have not entered a bootloader seed. + * + * under the assumption that the user may use this to + * load a seed from disk that we have already loaded + * from the bootloader, so that we don't double-count + * it. + */ + if (privileged && !atomic_load_relaxed(&E->boot_seeded)) + entropybits = MIN(ENTROPY_MAXDEBT, rdata->entropy); + + /* Enter the data. */ + entropy_enter(rdata->data, rdata->len, entropybits); + + /* + * If the ioctl claimed to provide any entropy and we + * weren't already seeded by the bootloader, mark + * ourselves as seeded by ioctl. + */ + if (entropybits) + atomic_store_relaxed(&E->ioctl_seeded, true); + break; + } + default: + error = ENOTTY; + } + + /* Return any error that may have come up. */ + return error; +} + +/* Legacy entry points */ + +void +rnd_seed(void *seed, size_t len) +{ + + if (len != sizeof(rndsave_t)) { + printf("entropy: invalid seed length: %zu," + " expected sizeof(rndsave_t) = %zu\n", + len, sizeof(rndsave_t)); + return; + } + entropy_seed(seed); +} + +void +rnd_init(void) +{ + + entropy_init(); +} + +void +rnd_init_softint(void) +{ + + entropy_init_late(); +} + +int +rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data) +{ + + return entropy_ioctl(cmd, data); +} diff --git a/sys/kern/kern_rndpool.c b/sys/kern/kern_rndpool.c deleted file mode 100644 index 41e64373f5e7..000000000000 --- a/sys/kern/kern_rndpool.c +++ /dev/null @@ -1,289 +0,0 @@ -/* $NetBSD: kern_rndpool.c,v 1.18 2019/02/03 03:19:28 mrg Exp $ */ - -/*- - * Copyright (c) 1997 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Michael Graff . This code uses ideas and - * algorithms from the Linux driver written by Ted Ts'o. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__KERNEL_RCSID(0, "$NetBSD: kern_rndpool.c,v 1.18 2019/02/03 03:19:28 mrg Exp $"); - -#include -#include -#include -#include - -#include - -/* - * The random pool "taps" - */ -#define TAP1 99 -#define TAP2 59 -#define TAP3 31 -#define TAP4 9 -#define TAP5 7 - -void -rndpool_init(rndpool_t *rp) -{ - - rp->cursor = 0; - rp->rotate = 1; - - memset(&rp->stats, 0, sizeof(rp->stats)); - - rp->stats.curentropy = 0; - rp->stats.poolsize = RND_POOLWORDS; - rp->stats.threshold = RND_ENTROPY_THRESHOLD; - rp->stats.maxentropy = RND_POOLBITS; -} - -u_int32_t -rndpool_get_entropy_count(rndpool_t *rp) -{ - - return (rp->stats.curentropy); -} - -void -rndpool_set_entropy_count(rndpool_t *rp, u_int32_t count) -{ - int32_t difference = count - rp->stats.curentropy; - - if (__predict_true(difference > 0)) { - rp->stats.added += difference; - } - - rp->stats.curentropy = count; - if (rp->stats.curentropy > RND_POOLBITS) { - rp->stats.discarded += (rp->stats.curentropy - RND_POOLBITS); - rp->stats.curentropy = RND_POOLBITS; - } -} - -void rndpool_get_stats(rndpool_t *rp, void *rsp, int size) -{ - - memcpy(rsp, &rp->stats, size); -} - -/* - * The input function treats the contents of the pool as an array of - * 32 LFSR's of length RND_POOLWORDS, one per bit-plane. The LFSR's - * are clocked once in parallel, using 32-bit xor operations, for each - * word to be added. - * - * Each word to be added is xor'd with the output word of the LFSR - * array (one tap at a time). - * - * In order to facilitate distribution of entropy between the - * bit-planes, a 32-bit rotate of this result is performed prior to - * feedback. The rotation distance is incremented every RND_POOLWORDS - * clocks, by a value that is relativly prime to the word size to try - * to spread the bits throughout the pool quickly when the pool is - * empty. - * - * Each LFSR thus takes its feedback from another LFSR, and is - * effectively re-keyed by both that LFSR and the new data. Feedback - * occurs with another XOR into the new LFSR, rather than assignment, - * to avoid destroying any entropy in the destination. - * - * Even with zeros as input, the LFSR output data are never visible; - * the contents of the pool are never divulged except via a hash of - * the entire pool, so there is no information for correlation - * attacks. With rotation-based rekeying, each LFSR runs at most a few - * cycles before being permuted. However, beware of initial - * conditions when no entropy has been added. - * - * The output function also stirs the generated hash back into the - * pool, further permuting the LFSRs and spreading entropy through the - * pool. Any unknown bits anywhere in the pool are thus reflected - * across all the LFSRs after output. - * - * (The final XOR assignment into the pool for feedback is equivalent - * to an additional LFSR tap of the MSB before shifting, in the case - * where no rotation is done, once every 32 cycles. This LFSR runs for - * at most one length.) - */ -static inline void -rndpool_add_one_word(rndpool_t *rp, u_int32_t val) -{ - /* - * Shifting is implemented using a cursor and taps as offsets, - * added mod the size of the pool. For this reason, - * RND_POOLWORDS must be a power of two. - */ - val ^= rp->pool[(rp->cursor + TAP1) & (RND_POOLWORDS - 1)]; - val ^= rp->pool[(rp->cursor + TAP2) & (RND_POOLWORDS - 1)]; - val ^= rp->pool[(rp->cursor + TAP3) & (RND_POOLWORDS - 1)]; - val ^= rp->pool[(rp->cursor + TAP4) & (RND_POOLWORDS - 1)]; - val ^= rp->pool[(rp->cursor + TAP5) & (RND_POOLWORDS - 1)]; - if (rp->rotate != 0) - val = ((val << rp->rotate) | (val >> (32 - rp->rotate))); - rp->pool[rp->cursor++] ^= val; - - /* - * If we have looped around the pool, increment the rotate - * variable so the next value will get xored in rotated to - * a different position. - */ - if (rp->cursor == RND_POOLWORDS) { - rp->cursor = 0; - rp->rotate = (rp->rotate + 7) & 31; - } -} - -/* - * Add a buffer's worth of data to the pool. - */ -void -rndpool_add_data(rndpool_t *rp, - const void * const p, u_int32_t len, u_int32_t entropy) -{ - u_int32_t val; - const u_int8_t * buf; - - buf = p; - - for (; len > 3; len -= 4) { - (void)memcpy(&val, buf, 4); - rndpool_add_one_word(rp, val); - buf += 4; - } - - if (len != 0) { - val = 0; - switch (len) { - case 3: - val = *buf++; - /* FALLTHROUGH */ - case 2: - val = val << 8 | *buf++; - /* FALLTHROUGH */ - case 1: - val = val << 8 | *buf++; - /* FALLTHROUGH */ - } - - rndpool_add_one_word(rp, val); - } - - rp->stats.curentropy += entropy; - rp->stats.added += entropy; - - if (rp->stats.curentropy > RND_POOLBITS) { - rp->stats.discarded += (rp->stats.curentropy - RND_POOLBITS); - rp->stats.curentropy = RND_POOLBITS; - } -} - -/* - * Extract some number of bytes from the random pool, decreasing the - * estimate of randomness as each byte is extracted. - * - * Do this by hashing the pool and returning a part of the hash as - * randomness. Stir the hash back into the pool. Note that no - * secrets going back into the pool are given away here since parts of - * the hash are xored together before being returned. - * - * Honor the request from the caller to only return good data, any data, - * etc. - * - * For the "high-quality" mode, we must have as much data as the caller - * requests, and at some point we must have had at least the "threshold" - * amount of entropy in the pool. - */ -u_int32_t -rndpool_extract_data(rndpool_t *rp, void *p, u_int32_t len, u_int32_t mode) -{ - u_int i; - SHA1_CTX hash; - u_char digest[SHA1_DIGEST_LENGTH]; - u_int32_t remain, deltae, count; - u_int8_t *buf; - - buf = p; - remain = len; - - KASSERT(RND_ENTROPY_THRESHOLD * 2 <= sizeof(digest)); - - while (remain != 0 && ! (mode == RND_EXTRACT_GOOD && - remain > rp->stats.curentropy * 8)) { - /* - * While bytes are requested, compute the hash of the pool, - * and then "fold" the hash in half with XOR, keeping the - * exact hash value secret, as it will be stirred back into - * the pool. - * - * XXX this approach needs examination by competant - * cryptographers! It's rather expensive per bit but - * also involves every bit of the pool in the - * computation of every output bit.. - */ - SHA1Init(&hash); - SHA1Update(&hash, (u_int8_t *)rp->pool, RND_POOLWORDS * 4); - SHA1Final(digest, &hash); - - /* - * Stir the hash back into the pool. This guarantees - * that the next hash will generate a different value - * if no new values were added to the pool. - */ - CTASSERT(RND_ENTROPY_THRESHOLD * 2 == SHA1_DIGEST_LENGTH); - for (i = 0; i < SHA1_DIGEST_LENGTH/4; i++) { - u_int32_t word; - memcpy(&word, &digest[i * 4], 4); - rndpool_add_one_word(rp, word); - } - - /* XXX careful, here the THRESHOLD just controls folding */ - count = uimin(remain, RND_ENTROPY_THRESHOLD); - - for (i = 0; i < count; i++) - buf[i] = digest[i] ^ digest[i + RND_ENTROPY_THRESHOLD]; - - buf += count; - deltae = count * 8; - remain -= count; - - deltae = uimin(deltae, rp->stats.curentropy); - - rp->stats.removed += deltae; - rp->stats.curentropy -= deltae; - - if (rp->stats.curentropy == 0) - rp->stats.generated += (count * 8) - deltae; - - } - - explicit_memset(&hash, 0, sizeof(hash)); - explicit_memset(digest, 0, sizeof(digest)); - - return (len - remain); -} diff --git a/sys/kern/kern_rndq.c b/sys/kern/kern_rndq.c deleted file mode 100644 index 2bf6548f42d3..000000000000 --- a/sys/kern/kern_rndq.c +++ /dev/null @@ -1,1725 +0,0 @@ -/* $NetBSD: kern_rndq.c,v 1.95 2019/09/29 12:07:52 rhialto Exp $ */ - -/*- - * Copyright (c) 1997-2013 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Michael Graff and Thor Lancelot Simon. - * This code uses ideas and algorithms from the Linux driver written by - * Ted Ts'o. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__KERNEL_RCSID(0, "$NetBSD: kern_rndq.c,v 1.95 2019/09/29 12:07:52 rhialto Exp $"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#if defined(__HAVE_CPU_RNG) && !defined(_RUMPKERNEL) -#include -#endif - -#if defined(__HAVE_CPU_COUNTER) -#include -#endif - -#ifdef RND_DEBUG -#define DPRINTF(l,x) if (rnd_debug & (l)) rnd_printf x -int rnd_debug = 0; -#else -#define DPRINTF(l,x) -#endif - -/* - * list devices attached - */ -#if 0 -#define RND_VERBOSE -#endif - -#ifdef RND_VERBOSE -#define rnd_printf_verbose(fmt, ...) rnd_printf(fmt, ##__VA_ARGS__) -#else -#define rnd_printf_verbose(fmt, ...) ((void)0) -#endif - -#ifdef RND_VERBOSE -static unsigned int deltacnt; -#endif - -/* - * This is a little bit of state information attached to each device that we - * collect entropy from. This is simply a collection buffer, and when it - * is full it will be "detached" from the source and added to the entropy - * pool after entropy is distilled as much as possible. - */ -#define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */ -typedef struct _rnd_sample_t { - SIMPLEQ_ENTRY(_rnd_sample_t) next; - krndsource_t *source; - int cursor; - int entropy; - uint32_t ts[RND_SAMPLE_COUNT]; - uint32_t values[RND_SAMPLE_COUNT]; -} rnd_sample_t; - -SIMPLEQ_HEAD(rnd_sampleq, _rnd_sample_t); - -/* - * The sample queue. Samples are put into the queue and processed in a - * softint in order to limit the latency of adding a sample. - */ -static struct { - kmutex_t lock; - struct rnd_sampleq q; -} rnd_samples __cacheline_aligned; - -/* - * Memory pool for sample buffers - */ -static pool_cache_t rnd_mempc __read_mostly; - -/* - * Global entropy pool and sources. - */ -static struct { - kmutex_t lock; - rndpool_t pool; - LIST_HEAD(, krndsource) sources; - kcondvar_t cv; -} rnd_global __cacheline_aligned; - -/* - * This source is used to easily "remove" queue entries when the source - * which actually generated the events is going away. - */ -static krndsource_t rnd_source_no_collect = { - /* LIST_ENTRY list */ - .name = { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', - 0, 0, 0, 0, 0, 0, 0 }, - .total = 0, - .type = RND_TYPE_UNKNOWN, - .flags = (RND_FLAG_NO_COLLECT | - RND_FLAG_NO_ESTIMATE), - .state = NULL, - .test_cnt = 0, - .test = NULL -}; - -krndsource_t rnd_printf_source, rnd_autoconf_source; - -static void *rnd_process __read_mostly; -static void *rnd_wakeup __read_mostly; - -static inline uint32_t rnd_counter(void); -static void rnd_intr(void *); -static void rnd_wake(void *); -static void rnd_process_events(void); -static void rnd_add_data_ts(krndsource_t *, const void *const, - uint32_t, uint32_t, uint32_t, bool); -static inline void rnd_schedule_process(void); - -int rnd_ready = 0; -int rnd_initial_entropy = 0; - -static volatile unsigned rnd_printing = 0; - -#ifdef DIAGNOSTIC -static int rnd_tested = 0; -static rngtest_t rnd_rt; -static uint8_t rnd_testbits[sizeof(rnd_rt.rt_b)]; -#endif - -static rndsave_t *boot_rsp; - -static inline void -rnd_printf(const char *fmt, ...) -{ - va_list ap; - - if (atomic_cas_uint(&rnd_printing, 0, 1) != 0) - return; - va_start(ap, fmt); - vprintf(fmt, ap); - va_end(ap); - rnd_printing = 0; -} - -/* - * Generate a 32-bit counter. - */ -static inline uint32_t -rnd_counter(void) -{ - struct bintime bt; - uint32_t ret; - -#if defined(__HAVE_CPU_COUNTER) - if (cpu_hascounter()) - return cpu_counter32(); -#endif - if (!rnd_ready) - /* Too early to call nanotime. */ - return 0; - - binuptime(&bt); - ret = bt.sec; - ret ^= bt.sec >> 32; - ret ^= bt.frac; - ret ^= bt.frac >> 32; - - return ret; -} - -/* - * We may be called from low IPL -- protect our softint. - */ - -static inline void -rnd_schedule_softint(void *softint) -{ - - kpreempt_disable(); - softint_schedule(softint); - kpreempt_enable(); -} - -static inline void -rnd_schedule_process(void) -{ - - if (__predict_true(rnd_process)) { - rnd_schedule_softint(rnd_process); - return; - } - rnd_process_events(); -} - -static inline void -rnd_schedule_wakeup(void) -{ - - if (__predict_true(rnd_wakeup)) { - rnd_schedule_softint(rnd_wakeup); - return; - } - rndsinks_distribute(); -} - -/* - * Tell any sources with "feed me" callbacks that we are hungry. - */ -void -rnd_getmore(size_t byteswanted) -{ - krndsource_t *rs, *next; - - /* - * Due to buffering in rnd_process_events, even if the entropy - * sources provide the requested number of bytes, users may not - * be woken because the data may be stuck in unfilled buffers. - * So ask for enough data to fill all the buffers. - * - * XXX Just get rid of this buffering and solve the - * /dev/random-as-side-channel-for-keystroke-timings a - * different way. - */ - byteswanted = MAX(byteswanted, - MAX(RND_POOLBITS/NBBY, sizeof(uint32_t)*RND_SAMPLE_COUNT)); - - mutex_spin_enter(&rnd_global.lock); - LIST_FOREACH_SAFE(rs, &rnd_global.sources, list, next) { - /* Skip if the source is disabled. */ - if (!RND_ENABLED(rs)) - continue; - - /* Skip if there's no callback. */ - if (!ISSET(rs->flags, RND_FLAG_HASCB)) - continue; - KASSERT(rs->get != NULL); - - /* Skip if there are too many users right now. */ - if (rs->refcnt == UINT_MAX) - continue; - - /* - * Hold a reference while we release rnd_global.lock to - * call the callback. The callback may in turn call - * rnd_add_data, which acquires rnd_global.lock. - */ - rs->refcnt++; - mutex_spin_exit(&rnd_global.lock); - rs->get(byteswanted, rs->getarg); - mutex_spin_enter(&rnd_global.lock); - if (--rs->refcnt == 0) - cv_broadcast(&rnd_global.cv); - - /* Dribble some goo to the console. */ - rnd_printf_verbose("rnd: entropy estimate %zu bits\n", - rndpool_get_entropy_count(&rnd_global.pool)); - rnd_printf_verbose("rnd: asking source %s for %zu bytes\n", - rs->name, byteswanted); - } - mutex_spin_exit(&rnd_global.lock); - - /* - * Check whether we got entropy samples to process. In that - * case, we may need to distribute entropy to waiters. Do - * that, if we can do it asynchronously. - * - * - Conditionally because we don't want a softint loop. - * - Asynchronously because if we did it synchronously, we may - * end up with lock recursion on rndsinks_lock. - */ - if (!SIMPLEQ_EMPTY(&rnd_samples.q) && rnd_process != NULL) - rnd_schedule_process(); -} - -/* - * Use the timing/value of the event to estimate the entropy gathered. - * If all the differentials (first, second, and third) are non-zero, return - * non-zero. If any of these are zero, return zero. - */ -static inline uint32_t -rnd_delta_estimate(rnd_delta_t *d, uint32_t v, uint32_t delta) -{ - uint32_t delta2, delta3; - - d->insamples++; - - /* - * Calculate the second and third order differentials - */ - if (delta > (uint32_t)d->dx) - delta2 = delta - (uint32_t)d->dx; - else - delta2 = (uint32_t)d->dx - delta; - - if (delta2 > (uint32_t)d->d2x) - delta3 = delta2 - (uint32_t)d->d2x; - else - delta3 = (uint32_t)d->d2x - delta2; - - d->x = v; - d->dx = delta; - d->d2x = delta2; - - /* - * If any delta is 0, we got no entropy. If all are non-zero, we - * might have something. - */ - if (delta == 0 || delta2 == 0 || delta3 == 0) - return 0; - - d->outbits++; - return 1; -} - -/* - * Delta estimator for 32-bit timestamps. - * Timestaps generally increase, but may wrap around to 0. - * If t decreases, it is assumed that wrap-around occurred (once). - */ -static inline uint32_t -rnd_dt_estimate(krndsource_t *rs, uint32_t t) -{ - uint32_t delta; - uint32_t ret; - rnd_delta_t *d = &rs->time_delta; - - if (t < (uint32_t)d->x) { - delta = UINT32_MAX - (uint32_t)d->x + t; - } else { - delta = t - (uint32_t)d->x; - } - - ret = rnd_delta_estimate(d, t, delta); - - KASSERT(d->x == t); - KASSERT(d->dx == delta); -#ifdef RND_VERBOSE - if (deltacnt++ % 1151 == 0) { - rnd_printf_verbose("rnd_dt_estimate: %s x = %lld, dx = %lld, " - "d2x = %lld\n", rs->name, - (int)d->x, (int)d->dx, (int)d->d2x); - } -#endif - return ret; -} - -/* - * Delta estimator for arbitrary unsigned 32 bit values. - */ -static inline uint32_t -rnd_dv_estimate(krndsource_t *rs, uint32_t v) -{ - uint32_t delta; - uint32_t ret; - rnd_delta_t *d = &rs->value_delta; - - if (v >= (uint32_t)d->x) { - delta = v - (uint32_t)d->x; - } else { - delta = (uint32_t)d->x - v; - } - - ret = rnd_delta_estimate(d, v, delta); - - KASSERT(d->x == v); - KASSERT(d->dx == delta); -#ifdef RND_VERBOSE - if (deltacnt++ % 1151 == 0) { - rnd_printf_verbose("rnd_dv_estimate: %s x = %lld, dx = %lld, " - " d2x = %lld\n", rs->name, - (long long int)d->x, - (long long int)d->dx, - (long long int)d->d2x); - } -#endif - return ret; -} - -#if defined(__HAVE_CPU_RNG) && !defined(_RUMPKERNEL) -static struct { - kmutex_t lock; /* unfortunately, must protect krndsource */ - krndsource_t source; -} rnd_cpu __cacheline_aligned; - -static void -rnd_cpu_get(size_t bytes, void *priv) -{ - krndsource_t *cpusrcp = priv; - cpu_rng_t buf[2 * RND_ENTROPY_THRESHOLD / sizeof(cpu_rng_t)]; - cpu_rng_t *bufp; - size_t cnt = __arraycount(buf); - size_t entropy = 0; - - KASSERT(cpusrcp == &rnd_cpu.source); - - for (bufp = buf; bufp < buf + cnt; bufp++) { - entropy += cpu_rng(bufp); - } - if (__predict_true(entropy)) { - mutex_spin_enter(&rnd_cpu.lock); - rnd_add_data_sync(cpusrcp, buf, sizeof(buf), entropy); - explicit_memset(buf, 0, sizeof(buf)); - mutex_spin_exit(&rnd_cpu.lock); - } -} - -#endif - -#if defined(__HAVE_CPU_COUNTER) -static struct { - kmutex_t lock; - int iter; - struct callout callout; - krndsource_t source; -} rnd_skew __cacheline_aligned; - -static void rnd_skew_intr(void *); - -static void -rnd_skew_enable(krndsource_t *rs, bool enabled) -{ - - if (enabled) { - rnd_skew_intr(rs); - } else { - callout_stop(&rnd_skew.callout); - } -} - -static void -rnd_skew_get(size_t bytes, void *priv) -{ - krndsource_t *skewsrcp __diagused = priv; - - KASSERT(skewsrcp == &rnd_skew.source); - - /* Measure 100 times */ - rnd_skew.iter = 100; - callout_schedule(&rnd_skew.callout, 1); -} - -static void -rnd_skew_intr(void *arg) -{ - /* - * Even on systems with seemingly stable clocks, the - * delta-time entropy estimator seems to think we get 1 bit here - * about every 2 calls. - * - */ - mutex_spin_enter(&rnd_skew.lock); - - if (RND_ENABLED(&rnd_skew.source)) { - int next_ticks = 1; - if (rnd_skew.iter & 1) { - rnd_add_uint32(&rnd_skew.source, rnd_counter()); - next_ticks = hz / 10; - } - if (--rnd_skew.iter > 0) { - callout_schedule(&rnd_skew.callout, next_ticks); - } - } - mutex_spin_exit(&rnd_skew.lock); -} -#endif - -void -rnd_init_softint(void) -{ - - rnd_process = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE, - rnd_intr, NULL); - rnd_wakeup = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, - rnd_wake, NULL); - rnd_schedule_process(); -} - -/* - * Entropy was just added to the pool. If we crossed the threshold for - * the first time, set rnd_initial_entropy = 1. - */ -static void -rnd_entropy_added(void) -{ - uint32_t pool_entropy; - - KASSERT(mutex_owned(&rnd_global.lock)); - - if (__predict_true(rnd_initial_entropy)) - return; - pool_entropy = rndpool_get_entropy_count(&rnd_global.pool); - if (pool_entropy > RND_ENTROPY_THRESHOLD * NBBY) { - rnd_printf_verbose("rnd: have initial entropy (%zu)\n", - pool_entropy); - rnd_initial_entropy = 1; - } -} - -/* - * initialize the global random pool for our use. - * rnd_init() must be called very early on in the boot process, so - * the pool is ready for other devices to attach as sources. - */ -void -rnd_init(void) -{ - uint32_t c; - - if (rnd_ready) - return; - - /* - * take a counter early, hoping that there's some variance in - * the following operations - */ - c = rnd_counter(); - - rndsinks_init(); - - /* Initialize the sample queue. */ - mutex_init(&rnd_samples.lock, MUTEX_DEFAULT, IPL_VM); - SIMPLEQ_INIT(&rnd_samples.q); - - /* Initialize the global pool and sources list. */ - mutex_init(&rnd_global.lock, MUTEX_DEFAULT, IPL_VM); - rndpool_init(&rnd_global.pool); - LIST_INIT(&rnd_global.sources); - cv_init(&rnd_global.cv, "rndsrc"); - - rnd_mempc = pool_cache_init(sizeof(rnd_sample_t), 0, 0, 0, - "rndsample", NULL, IPL_VM, - NULL, NULL, NULL); - - /* - * Set resource limit. The rnd_process_events() function - * is called every tick and process the sample queue. - * Without limitation, if a lot of rnd_add_*() are called, - * all kernel memory may be eaten up. - */ - pool_cache_sethardlimit(rnd_mempc, RND_POOLBITS, NULL, 0); - - /* - * Mix *something*, *anything* into the pool to help it get started. - * However, it's not safe for rnd_counter() to call microtime() yet, - * so on some platforms we might just end up with zeros anyway. - * XXX more things to add would be nice. - */ - if (c) { - mutex_spin_enter(&rnd_global.lock); - rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); - c = rnd_counter(); - rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); - mutex_spin_exit(&rnd_global.lock); - } - - /* - * Attach CPU RNG if available. - */ -#if defined(__HAVE_CPU_RNG) && !defined(_RUMPKERNEL) - if (cpu_rng_init()) { - /* IPL_VM because taken while rnd_global.lock is held. */ - mutex_init(&rnd_cpu.lock, MUTEX_DEFAULT, IPL_VM); - rndsource_setcb(&rnd_cpu.source, rnd_cpu_get, &rnd_cpu.source); - rnd_attach_source(&rnd_cpu.source, "cpurng", - RND_TYPE_RNG, RND_FLAG_COLLECT_VALUE| - RND_FLAG_HASCB|RND_FLAG_HASENABLE); - rnd_cpu_get(RND_ENTROPY_THRESHOLD, &rnd_cpu.source); - } -#endif - - /* - * If we have a cycle counter, take its error with respect - * to the callout mechanism as a source of entropy, ala - * TrueRand. - * - */ -#if defined(__HAVE_CPU_COUNTER) - /* IPL_VM because taken while rnd_global.lock is held. */ - mutex_init(&rnd_skew.lock, MUTEX_DEFAULT, IPL_VM); - callout_init(&rnd_skew.callout, CALLOUT_MPSAFE); - callout_setfunc(&rnd_skew.callout, rnd_skew_intr, NULL); - rndsource_setcb(&rnd_skew.source, rnd_skew_get, &rnd_skew.source); - rndsource_setenable(&rnd_skew.source, rnd_skew_enable); - rnd_attach_source(&rnd_skew.source, "callout", RND_TYPE_SKEW, - RND_FLAG_COLLECT_VALUE|RND_FLAG_ESTIMATE_VALUE| - RND_FLAG_HASCB|RND_FLAG_HASENABLE); - rnd_skew.iter = 100; - rnd_skew_intr(NULL); -#endif - - rnd_printf_verbose("rnd: initialised (%u)%s", RND_POOLBITS, - c ? " with counter\n" : "\n"); - if (boot_rsp != NULL) { - mutex_spin_enter(&rnd_global.lock); - rndpool_add_data(&rnd_global.pool, boot_rsp->data, - sizeof(boot_rsp->data), - MIN(boot_rsp->entropy, RND_POOLBITS / 2)); - rnd_entropy_added(); - mutex_spin_exit(&rnd_global.lock); - rnd_printf("rnd: seeded with %d bits\n", - MIN(boot_rsp->entropy, RND_POOLBITS / 2)); - explicit_memset(boot_rsp, 0, sizeof(*boot_rsp)); - } - rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN, - RND_FLAG_NO_ESTIMATE); - rnd_attach_source(&rnd_autoconf_source, "autoconf", - RND_TYPE_UNKNOWN, - RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME); - rnd_ready = 1; -} - -static rnd_sample_t * -rnd_sample_allocate(krndsource_t *source) -{ - rnd_sample_t *c; - - c = pool_cache_get(rnd_mempc, PR_WAITOK); - if (c == NULL) - return NULL; - - c->source = source; - c->cursor = 0; - c->entropy = 0; - - return c; -} - -/* - * Don't wait on allocation. To be used in an interrupt context. - */ -static rnd_sample_t * -rnd_sample_allocate_isr(krndsource_t *source) -{ - rnd_sample_t *c; - - c = pool_cache_get(rnd_mempc, PR_NOWAIT); - if (c == NULL) - return NULL; - - c->source = source; - c->cursor = 0; - c->entropy = 0; - - return c; -} - -static void -rnd_sample_free(rnd_sample_t *c) -{ - - explicit_memset(c, 0, sizeof(*c)); - pool_cache_put(rnd_mempc, c); -} - -/* - * Add a source to our list of sources. - */ -void -rnd_attach_source(krndsource_t *rs, const char *name, uint32_t type, - uint32_t flags) -{ - uint32_t ts; - - ts = rnd_counter(); - - strlcpy(rs->name, name, sizeof(rs->name)); - memset(&rs->time_delta, 0, sizeof(rs->time_delta)); - rs->time_delta.x = ts; - memset(&rs->value_delta, 0, sizeof(rs->value_delta)); - rs->total = 0; - - /* - * Some source setup, by type - */ - rs->test = NULL; - rs->test_cnt = -1; - - if (flags == 0) { - flags = RND_FLAG_DEFAULT; - } - - switch (type) { - case RND_TYPE_NET: /* Don't collect by default */ - flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE); - break; - case RND_TYPE_RNG: /* Space for statistical testing */ - rs->test = kmem_alloc(sizeof(rngtest_t), KM_NOSLEEP); - rs->test_cnt = 0; - /* FALLTHRU */ - case RND_TYPE_VM: /* Process samples in bulk always */ - flags |= RND_FLAG_FAST; - break; - default: - break; - } - - rs->type = type; - rs->flags = flags; - rs->refcnt = 1; - - rs->state = rnd_sample_allocate(rs); - - mutex_spin_enter(&rnd_global.lock); - -#ifdef DIAGNOSTIC - krndsource_t *s; - LIST_FOREACH(s, &rnd_global.sources, list) { - if (s == rs) { - panic("%s: source '%s' already attached", - __func__, name); - /* NOTREACHED */ - } - } -#endif - - LIST_INSERT_HEAD(&rnd_global.sources, rs, list); - -#ifdef RND_VERBOSE - rnd_printf_verbose("rnd: %s attached as an entropy source (", - rs->name); - if (!(flags & RND_FLAG_NO_COLLECT)) { - rnd_printf_verbose("collecting"); - if (flags & RND_FLAG_NO_ESTIMATE) - rnd_printf_verbose(" without estimation"); - } else { - rnd_printf_verbose("off"); - } - rnd_printf_verbose(")\n"); -#endif - - /* - * Again, put some more initial junk in the pool. - * FreeBSD claim to have an analysis that show 4 bits of - * entropy per source-attach timestamp. I am skeptical, - * but we count 1 bit per source here. - */ - rndpool_add_data(&rnd_global.pool, &ts, sizeof(ts), 1); - mutex_spin_exit(&rnd_global.lock); -} - -/* - * Remove a source from our list of sources. - */ -void -rnd_detach_source(krndsource_t *source) -{ - rnd_sample_t *sample; - - mutex_spin_enter(&rnd_global.lock); - LIST_REMOVE(source, list); - if (0 < --source->refcnt) { - do { - cv_wait(&rnd_global.cv, &rnd_global.lock); - } while (0 < source->refcnt); - } - mutex_spin_exit(&rnd_global.lock); - - /* - * If there are samples queued up "remove" them from the sample queue - * by setting the source to the no-collect pseudosource. - */ - mutex_spin_enter(&rnd_samples.lock); - sample = SIMPLEQ_FIRST(&rnd_samples.q); - while (sample != NULL) { - if (sample->source == source) - sample->source = &rnd_source_no_collect; - - sample = SIMPLEQ_NEXT(sample, next); - } - mutex_spin_exit(&rnd_samples.lock); - - if (source->state) { - rnd_sample_free(source->state); - source->state = NULL; - } - - if (source->test) { - kmem_free(source->test, sizeof(rngtest_t)); - } - - rnd_printf_verbose("rnd: %s detached as an entropy source\n", - source->name); -} - -static inline uint32_t -rnd_estimate(krndsource_t *rs, uint32_t ts, uint32_t val) -{ - uint32_t entropy = 0, dt_est, dv_est; - - dt_est = rnd_dt_estimate(rs, ts); - dv_est = rnd_dv_estimate(rs, val); - - if (!(rs->flags & RND_FLAG_NO_ESTIMATE)) { - if (rs->flags & RND_FLAG_ESTIMATE_TIME) { - entropy += dt_est; - } - - if (rs->flags & RND_FLAG_ESTIMATE_VALUE) { - entropy += dv_est; - } - } - return entropy; -} - -/* - * Add a 32-bit value to the entropy pool. The rs parameter should point to - * the source-specific source structure. - */ -void -_rnd_add_uint32(krndsource_t *rs, uint32_t val) -{ - uint32_t ts; - uint32_t entropy = 0; - - if (rs->flags & RND_FLAG_NO_COLLECT) - return; - - /* - * Sample the counter as soon as possible to avoid - * entropy overestimation. - */ - ts = rnd_counter(); - - /* - * Calculate estimates - we may not use them, but if we do - * not calculate them, the estimators' history becomes invalid. - */ - entropy = rnd_estimate(rs, ts, val); - - rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts, true); -} - -void -_rnd_add_uint64(krndsource_t *rs, uint64_t val) -{ - uint32_t ts; - uint32_t entropy = 0; - - if (rs->flags & RND_FLAG_NO_COLLECT) - return; - - /* - * Sample the counter as soon as possible to avoid - * entropy overestimation. - */ - ts = rnd_counter(); - - /* - * Calculate estimates - we may not use them, but if we do - * not calculate them, the estimators' history becomes invalid. - */ - entropy = rnd_estimate(rs, ts, (uint32_t)(val & (uint64_t)0xffffffff)); - - rnd_add_data_ts(rs, &val, sizeof(val), entropy, ts, true); -} - -void -rnd_add_data(krndsource_t *rs, const void *const data, uint32_t len, - uint32_t entropy) -{ - - /* - * This interface is meant for feeding data which is, - * itself, random. Don't estimate entropy based on - * timestamp, just directly add the data. - */ - if (__predict_false(rs == NULL)) { - mutex_spin_enter(&rnd_global.lock); - rndpool_add_data(&rnd_global.pool, data, len, entropy); - mutex_spin_exit(&rnd_global.lock); - } else { - rnd_add_data_ts(rs, data, len, entropy, rnd_counter(), true); - } -} - -void -rnd_add_data_sync(krndsource_t *rs, const void *data, uint32_t len, - uint32_t entropy) -{ - - KASSERT(rs != NULL); - rnd_add_data_ts(rs, data, len, entropy, rnd_counter(), false); -} - -static void -rnd_add_data_ts(krndsource_t *rs, const void *const data, uint32_t len, - uint32_t entropy, uint32_t ts, bool schedule) -{ - rnd_sample_t *state = NULL; - const uint8_t *p = data; - uint32_t dint; - int todo, done, filled = 0; - int sample_count; - struct rnd_sampleq tmp_samples = SIMPLEQ_HEAD_INITIALIZER(tmp_samples); - - if (rs && - (rs->flags & RND_FLAG_NO_COLLECT || - __predict_false(!(rs->flags & - (RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE))))) { - return; - } - todo = len / sizeof(dint); - /* - * Let's try to be efficient: if we are warm, and a source - * is adding entropy at a rate of at least 1 bit every 10 seconds, - * mark it as "fast" and add its samples in bulk. - */ - if (__predict_true(rs->flags & RND_FLAG_FAST) || - (todo >= RND_SAMPLE_COUNT)) { - sample_count = RND_SAMPLE_COUNT; - } else { - if (!(rs->flags & RND_FLAG_HASCB) && - !cold && rnd_initial_entropy) { - struct timeval upt; - - getmicrouptime(&upt); - if ((upt.tv_sec > 0 && rs->total > upt.tv_sec * 10) || - (upt.tv_sec > 10 && rs->total > upt.tv_sec) || - (upt.tv_sec > 100 && - rs->total > upt.tv_sec / 10)) { - rnd_printf_verbose("rnd: source %s is fast" - " (%d samples at once," - " %d bits in %lld seconds), " - "processing samples in bulk.\n", - rs->name, todo, rs->total, - (long long int)upt.tv_sec); - rs->flags |= RND_FLAG_FAST; - } - } - sample_count = 2; - } - - /* - * Loop over data packaging it into sample buffers. - * If a sample buffer allocation fails, drop all data. - */ - for (done = 0; done < todo ; done++) { - state = rs->state; - if (state == NULL) { - state = rnd_sample_allocate_isr(rs); - if (__predict_false(state == NULL)) { - break; - } - rs->state = state; - } - - state->ts[state->cursor] = ts; - (void)memcpy(&dint, &p[done*4], 4); - state->values[state->cursor] = dint; - state->cursor++; - - if (state->cursor == sample_count) { - SIMPLEQ_INSERT_HEAD(&tmp_samples, state, next); - filled++; - rs->state = NULL; - } - } - - if (__predict_false(state == NULL)) { - while ((state = SIMPLEQ_FIRST(&tmp_samples))) { - SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); - rnd_sample_free(state); - } - return; - } - - /* - * Claim all the entropy on the last one we send to - * the pool, so we don't rely on it being evenly distributed - * in the supplied data. - * - * XXX The rndpool code must accept samples with more - * XXX claimed entropy than bits for this to work right. - */ - state->entropy += entropy; - rs->total += entropy; - - /* - * If we didn't finish any sample buffers, we're done. - */ - if (!filled) { - return; - } - - mutex_spin_enter(&rnd_samples.lock); - while ((state = SIMPLEQ_FIRST(&tmp_samples))) { - SIMPLEQ_REMOVE_HEAD(&tmp_samples, next); - SIMPLEQ_INSERT_HEAD(&rnd_samples.q, state, next); - } - mutex_spin_exit(&rnd_samples.lock); - - /* Cause processing of queued samples, if caller wants it. */ - if (schedule) - rnd_schedule_process(); -} - -static int -rnd_hwrng_test(rnd_sample_t *sample) -{ - krndsource_t *source = sample->source; - size_t cmplen; - uint8_t *v1, *v2; - size_t resid, totest; - - KASSERT(source->type == RND_TYPE_RNG); - - /* - * Continuous-output test: compare two halves of the - * sample buffer to each other. The sample buffer (64 ints, - * so either 256 or 512 bytes on any modern machine) should be - * much larger than a typical hardware RNG output, so this seems - * a reasonable way to do it without retaining extra data. - */ - cmplen = sizeof(sample->values) / 2; - v1 = (uint8_t *)sample->values; - v2 = (uint8_t *)sample->values + cmplen; - - if (__predict_false(!memcmp(v1, v2, cmplen))) { - rnd_printf("rnd: source \"%s\"" - " failed continuous-output test.\n", - source->name); - return 1; - } - - /* - * FIPS 140 statistical RNG test. We must accumulate 20,000 bits. - */ - if (__predict_true(source->test_cnt == -1)) { - /* already passed the test */ - return 0; - } - resid = FIPS140_RNG_TEST_BYTES - source->test_cnt; - totest = MIN(RND_SAMPLE_COUNT * 4, resid); - memcpy(source->test->rt_b + source->test_cnt, sample->values, totest); - resid -= totest; - source->test_cnt += totest; - if (resid == 0) { - strlcpy(source->test->rt_name, source->name, - sizeof(source->test->rt_name)); - if (rngtest(source->test)) { - rnd_printf("rnd: source \"%s\"" - " failed statistical test.", - source->name); - return 1; - } - source->test_cnt = -1; - explicit_memset(source->test, 0, sizeof(*source->test)); - } - return 0; -} - -/* - * Process the events in the ring buffer. Called by rnd_timeout or - * by the add routines directly if the callout has never fired (that - * is, if we are "cold" -- just booted). - * - */ -static void -rnd_process_events(void) -{ - rnd_sample_t *sample = NULL; - krndsource_t *source; - static krndsource_t *last_source; - uint32_t entropy; - size_t pool_entropy; - int wake = 0; - struct rnd_sampleq dq_samples = SIMPLEQ_HEAD_INITIALIZER(dq_samples); - struct rnd_sampleq df_samples = SIMPLEQ_HEAD_INITIALIZER(df_samples); - - /* - * Drain to the on-stack queue and drop the lock. - */ - mutex_spin_enter(&rnd_samples.lock); - while ((sample = SIMPLEQ_FIRST(&rnd_samples.q))) { - SIMPLEQ_REMOVE_HEAD(&rnd_samples.q, next); - /* - * We repeat this check here, since it is possible - * the source was disabled before we were called, but - * after the entry was queued. - */ - if (__predict_false(!(sample->source->flags & - (RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE)))) { - SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); - } else { - SIMPLEQ_INSERT_TAIL(&dq_samples, sample, next); - } - } - mutex_spin_exit(&rnd_samples.lock); - - /* Don't thrash the rndpool mtx either. Hold, add all samples. */ - mutex_spin_enter(&rnd_global.lock); - - pool_entropy = rndpool_get_entropy_count(&rnd_global.pool); - - while ((sample = SIMPLEQ_FIRST(&dq_samples))) { - int sample_count; - - SIMPLEQ_REMOVE_HEAD(&dq_samples, next); - source = sample->source; - entropy = sample->entropy; - sample_count = sample->cursor; - - /* - * Don't provide a side channel for timing attacks on - * low-rate sources: require mixing with some other - * source before we schedule a wakeup. - */ - if (!wake && - (source != last_source || source->flags & RND_FLAG_FAST)) { - wake++; - } - last_source = source; - - /* - * If the source has been disabled, ignore samples from - * it. - */ - if (source->flags & RND_FLAG_NO_COLLECT) - goto skip; - - /* - * Hardware generators are great but sometimes they - * have...hardware issues. Don't use any data from - * them unless it passes some tests. - */ - if (source->type == RND_TYPE_RNG) { - if (__predict_false(rnd_hwrng_test(sample))) { - source->flags |= RND_FLAG_NO_COLLECT; - rnd_printf("rnd: disabling source \"%s\".\n", - source->name); - goto skip; - } - } - - if (source->flags & RND_FLAG_COLLECT_VALUE) { - rndpool_add_data(&rnd_global.pool, sample->values, - sample_count * sizeof(sample->values[1]), - 0); - } - if (source->flags & RND_FLAG_COLLECT_TIME) { - rndpool_add_data(&rnd_global.pool, sample->ts, - sample_count * sizeof(sample->ts[1]), - 0); - } - - pool_entropy += entropy; - source->total += sample->entropy; -skip: SIMPLEQ_INSERT_TAIL(&df_samples, sample, next); - } - rndpool_set_entropy_count(&rnd_global.pool, pool_entropy); - rnd_entropy_added(); - mutex_spin_exit(&rnd_global.lock); - - /* - * If we filled the pool past the threshold, wake anyone - * waiting for entropy. - */ - if (pool_entropy > RND_ENTROPY_THRESHOLD * 8) { - wake++; - } - - /* Now we hold no locks: clean up. */ - while ((sample = SIMPLEQ_FIRST(&df_samples))) { - SIMPLEQ_REMOVE_HEAD(&df_samples, next); - rnd_sample_free(sample); - } - - /* - * Wake up any potential readers waiting. - */ - if (wake) { - rnd_schedule_wakeup(); - } -} - -static void -rnd_intr(void *arg) -{ - - rnd_process_events(); -} - -static void -rnd_wake(void *arg) -{ - - rndsinks_distribute(); -} - -static uint32_t -rnd_extract_data(void *p, uint32_t len, uint32_t flags) -{ - static int timed_in; - uint32_t retval; - - mutex_spin_enter(&rnd_global.lock); - if (__predict_false(!timed_in)) { - if (boottime.tv_sec) { - rndpool_add_data(&rnd_global.pool, &boottime, - sizeof(boottime), 0); - } - timed_in++; - } - if (__predict_false(!rnd_initial_entropy)) { - uint32_t c; - - rnd_printf_verbose("rnd: WARNING! initial entropy low (%u).\n", - rndpool_get_entropy_count(&rnd_global.pool)); - /* Try once again to put something in the pool */ - c = rnd_counter(); - rndpool_add_data(&rnd_global.pool, &c, sizeof(c), 1); - } - -#ifdef DIAGNOSTIC - while (!rnd_tested) { - int entropy_count = - rndpool_get_entropy_count(&rnd_global.pool); - rnd_printf_verbose("rnd: starting statistical RNG test," - " entropy = %d.\n", - entropy_count); - if (rndpool_extract_data(&rnd_global.pool, rnd_rt.rt_b, - sizeof(rnd_rt.rt_b), RND_EXTRACT_ANY) - != sizeof(rnd_rt.rt_b)) { - panic("rnd: could not get bits for statistical test"); - } - /* - * Stash the tested bits so we can put them back in the - * pool, restoring the entropy count. DO NOT rely on - * rngtest to maintain the bits pristine -- we could end - * up adding back non-random data claiming it were pure - * entropy. - */ - memcpy(rnd_testbits, rnd_rt.rt_b, sizeof(rnd_rt.rt_b)); - strlcpy(rnd_rt.rt_name, "entropy pool", - sizeof(rnd_rt.rt_name)); - if (rngtest(&rnd_rt)) { - /* - * The probabiliity of a Type I error is 3/10000, - * but note this can only happen at boot time. - * The relevant standard says to reset the module, - * but developers objected... - */ - rnd_printf("rnd: WARNING, ENTROPY POOL FAILED " - "STATISTICAL TEST!\n"); - continue; - } - explicit_memset(&rnd_rt, 0, sizeof(rnd_rt)); - rndpool_add_data(&rnd_global.pool, rnd_testbits, - sizeof(rnd_testbits), entropy_count); - explicit_memset(rnd_testbits, 0, sizeof(rnd_testbits)); - rnd_printf_verbose("rnd: statistical RNG test done," - " entropy = %d.\n", - rndpool_get_entropy_count(&rnd_global.pool)); - rnd_tested++; - } -#endif - retval = rndpool_extract_data(&rnd_global.pool, p, len, flags); - mutex_spin_exit(&rnd_global.lock); - - return retval; -} - -/* - * Fill the buffer with as much entropy as we can. Return true if it - * has full entropy and false if not. - */ -bool -rnd_extract(void *buffer, size_t bytes) -{ - const size_t extracted = rnd_extract_data(buffer, bytes, - RND_EXTRACT_GOOD); - - if (extracted < bytes) { - rnd_getmore(bytes - extracted); - (void)rnd_extract_data((uint8_t *)buffer + extracted, - bytes - extracted, RND_EXTRACT_ANY); - return false; - } - - return true; -} - -/* - * If we have as much entropy as is requested, fill the buffer with it - * and return true. Otherwise, leave the buffer alone and return - * false. - */ - -CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL); -CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD)); -CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <= - (0xffffffffUL / NBBY)); - -bool -rnd_tryextract(void *buffer, size_t bytes) -{ - uint32_t bits_needed, bytes_requested; - - KASSERT(bytes <= RNDSINK_MAX_BYTES); - bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY); - - mutex_spin_enter(&rnd_global.lock); - if (bits_needed <= rndpool_get_entropy_count(&rnd_global.pool)) { - const uint32_t extracted __diagused = - rndpool_extract_data(&rnd_global.pool, buffer, bytes, - RND_EXTRACT_GOOD); - - KASSERT(extracted == bytes); - bytes_requested = 0; - } else { - /* XXX Figure the threshold into this... */ - bytes_requested = howmany((bits_needed - - rndpool_get_entropy_count(&rnd_global.pool)), NBBY); - KASSERT(0 < bytes_requested); - } - mutex_spin_exit(&rnd_global.lock); - - if (0 < bytes_requested) - rnd_getmore(bytes_requested); - - return bytes_requested == 0; -} - -void -rnd_seed(void *base, size_t len) -{ - SHA1_CTX s; - uint8_t digest[SHA1_DIGEST_LENGTH]; - - if (len != sizeof(*boot_rsp)) { - rnd_printf("rnd: bad seed length %d\n", (int)len); - return; - } - - boot_rsp = (rndsave_t *)base; - SHA1Init(&s); - SHA1Update(&s, (uint8_t *)&boot_rsp->entropy, - sizeof(boot_rsp->entropy)); - SHA1Update(&s, boot_rsp->data, sizeof(boot_rsp->data)); - SHA1Final(digest, &s); - - if (memcmp(digest, boot_rsp->digest, sizeof(digest))) { - rnd_printf("rnd: bad seed checksum\n"); - return; - } - - /* - * It's not really well-defined whether bootloader-supplied - * modules run before or after rnd_init(). Handle both cases. - */ - if (rnd_ready) { - rnd_printf_verbose("rnd: ready," - " feeding in seed data directly.\n"); - mutex_spin_enter(&rnd_global.lock); - rndpool_add_data(&rnd_global.pool, boot_rsp->data, - sizeof(boot_rsp->data), - MIN(boot_rsp->entropy, RND_POOLBITS / 2)); - explicit_memset(boot_rsp, 0, sizeof(*boot_rsp)); - mutex_spin_exit(&rnd_global.lock); - } else { - rnd_printf_verbose("rnd: not ready, deferring seed feed.\n"); - } -} - -static void -krndsource_to_rndsource(krndsource_t *kr, rndsource_t *r) -{ - - memset(r, 0, sizeof(*r)); - strlcpy(r->name, kr->name, sizeof(r->name)); - r->total = kr->total; - r->type = kr->type; - r->flags = kr->flags; -} - -static void -krndsource_to_rndsource_est(krndsource_t *kr, rndsource_est_t *re) -{ - - memset(re, 0, sizeof(*re)); - krndsource_to_rndsource(kr, &re->rt); - re->dt_samples = kr->time_delta.insamples; - re->dt_total = kr->time_delta.outbits; - re->dv_samples = kr->value_delta.insamples; - re->dv_total = kr->value_delta.outbits; -} - -static void -krs_setflags(krndsource_t *kr, uint32_t flags, uint32_t mask) -{ - uint32_t oflags = kr->flags; - - kr->flags &= ~mask; - kr->flags |= (flags & mask); - - if (oflags & RND_FLAG_HASENABLE && - ((oflags & RND_FLAG_NO_COLLECT) != - (flags & RND_FLAG_NO_COLLECT))) { - kr->enable(kr, !(flags & RND_FLAG_NO_COLLECT)); - } -} - -int -rnd_system_ioctl(struct file *fp, u_long cmd, void *addr) -{ - krndsource_t *kr; - rndstat_t *rst; - rndstat_name_t *rstnm; - rndstat_est_t *rset; - rndstat_est_name_t *rsetnm; - rndctl_t *rctl; - rnddata_t *rnddata; - uint32_t count, start; - int ret = 0; - int estimate_ok = 0, estimate = 0; - - switch (cmd) { - case RNDGETENTCNT: - break; - - case RNDGETPOOLSTAT: - case RNDGETSRCNUM: - case RNDGETSRCNAME: - case RNDGETESTNUM: - case RNDGETESTNAME: - ret = kauth_authorize_device(curlwp->l_cred, - KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL); - if (ret) - return ret; - break; - - case RNDCTL: - ret = kauth_authorize_device(curlwp->l_cred, - KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL); - if (ret) - return ret; - break; - - case RNDADDDATA: - ret = kauth_authorize_device(curlwp->l_cred, - KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL); - if (ret) - return ret; - estimate_ok = !kauth_authorize_device(curlwp->l_cred, - KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL); - break; - - default: - MODULE_HOOK_CALL(rnd_ioctl_50_hook, (fp, cmd, addr), - enosys(), ret); -#if defined(_LP64) - if (ret == ENOSYS) - MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (fp, cmd, addr), - enosys(), ret); -#endif - if (ret == ENOSYS) - ret = ENOTTY; - return ret; - } - - switch (cmd) { - case RNDGETENTCNT: - mutex_spin_enter(&rnd_global.lock); - *(uint32_t *)addr = - rndpool_get_entropy_count(&rnd_global.pool); - mutex_spin_exit(&rnd_global.lock); - break; - - case RNDGETPOOLSTAT: - mutex_spin_enter(&rnd_global.lock); - rndpool_get_stats(&rnd_global.pool, addr, - sizeof(rndpoolstat_t)); - mutex_spin_exit(&rnd_global.lock); - break; - - case RNDGETSRCNUM: - rst = (rndstat_t *)addr; - - if (rst->count == 0) - break; - - if (rst->count > RND_MAXSTATCOUNT) - return EINVAL; - - mutex_spin_enter(&rnd_global.lock); - /* - * Find the starting source by running through the - * list of sources. - */ - kr = LIST_FIRST(&rnd_global.sources); - start = rst->start; - while (kr != NULL && start >= 1) { - kr = LIST_NEXT(kr, list); - start--; - } - - /* - * Return up to as many structures as the user asked - * for. If we run out of sources, a count of zero - * will be returned, without an error. - */ - for (count = 0; count < rst->count && kr != NULL; count++) { - krndsource_to_rndsource(kr, &rst->source[count]); - kr = LIST_NEXT(kr, list); - } - - rst->count = count; - - mutex_spin_exit(&rnd_global.lock); - break; - - case RNDGETESTNUM: - rset = (rndstat_est_t *)addr; - - if (rset->count == 0) - break; - - if (rset->count > RND_MAXSTATCOUNT) - return EINVAL; - - mutex_spin_enter(&rnd_global.lock); - /* - * Find the starting source by running through the - * list of sources. - */ - kr = LIST_FIRST(&rnd_global.sources); - start = rset->start; - while (kr != NULL && start > 0) { - kr = LIST_NEXT(kr, list); - start--; - } - - /* - * Return up to as many structures as the user asked - * for. If we run out of sources, a count of zero - * will be returned, without an error. - */ - for (count = 0; count < rset->count && kr != NULL; count++) { - krndsource_to_rndsource_est(kr, &rset->source[count]); - kr = LIST_NEXT(kr, list); - } - - rset->count = count; - - mutex_spin_exit(&rnd_global.lock); - break; - - case RNDGETSRCNAME: - /* - * Scan through the list, trying to find the name. - */ - mutex_spin_enter(&rnd_global.lock); - rstnm = (rndstat_name_t *)addr; - kr = LIST_FIRST(&rnd_global.sources); - while (kr != NULL) { - if (strncmp(kr->name, rstnm->name, - MIN(sizeof(kr->name), - sizeof(rstnm->name))) == 0) { - krndsource_to_rndsource(kr, &rstnm->source); - mutex_spin_exit(&rnd_global.lock); - return 0; - } - kr = LIST_NEXT(kr, list); - } - mutex_spin_exit(&rnd_global.lock); - - ret = ENOENT; /* name not found */ - - break; - - case RNDGETESTNAME: - /* - * Scan through the list, trying to find the name. - */ - mutex_spin_enter(&rnd_global.lock); - rsetnm = (rndstat_est_name_t *)addr; - kr = LIST_FIRST(&rnd_global.sources); - while (kr != NULL) { - if (strncmp(kr->name, rsetnm->name, - MIN(sizeof(kr->name), sizeof(rsetnm->name))) - == 0) { - krndsource_to_rndsource_est(kr, - &rsetnm->source); - mutex_spin_exit(&rnd_global.lock); - return 0; - } - kr = LIST_NEXT(kr, list); - } - mutex_spin_exit(&rnd_global.lock); - - ret = ENOENT; /* name not found */ - - break; - - case RNDCTL: - /* - * Set flags to enable/disable entropy counting and/or - * collection. - */ - mutex_spin_enter(&rnd_global.lock); - rctl = (rndctl_t *)addr; - kr = LIST_FIRST(&rnd_global.sources); - - /* - * Flags set apply to all sources of this type. - */ - if (rctl->type != 0xff) { - while (kr != NULL) { - if (kr->type == rctl->type) { - krs_setflags(kr, rctl->flags, - rctl->mask); - } - kr = LIST_NEXT(kr, list); - } - mutex_spin_exit(&rnd_global.lock); - return 0; - } - - /* - * scan through the list, trying to find the name - */ - while (kr != NULL) { - if (strncmp(kr->name, rctl->name, - MIN(sizeof(kr->name), sizeof(rctl->name))) - == 0) { - krs_setflags(kr, rctl->flags, rctl->mask); - mutex_spin_exit(&rnd_global.lock); - return 0; - } - kr = LIST_NEXT(kr, list); - } - - mutex_spin_exit(&rnd_global.lock); - ret = ENOENT; /* name not found */ - - break; - - case RNDADDDATA: - /* - * Don't seed twice if our bootloader has - * seed loading support. - */ - if (!boot_rsp) { - rnddata = (rnddata_t *)addr; - - if (rnddata->len > sizeof(rnddata->data)) - return EINVAL; - - if (estimate_ok) { - /* - * Do not accept absurd entropy estimates, and - * do not flood the pool with entropy such that - * new samples are discarded henceforth. - */ - estimate = MIN((rnddata->len * NBBY) / 2, - MIN(rnddata->entropy, RND_POOLBITS / 2)); - } else { - estimate = 0; - } - - mutex_spin_enter(&rnd_global.lock); - rndpool_add_data(&rnd_global.pool, rnddata->data, - rnddata->len, estimate); - rnd_entropy_added(); - mutex_spin_exit(&rnd_global.lock); - - rndsinks_distribute(); - } else { - rnd_printf_verbose("rnd" - ": already seeded by boot loader\n"); - } - break; - - default: - return ENOTTY; - } - - return ret; -} diff --git a/sys/kern/kern_rndsink.c b/sys/kern/kern_rndsink.c deleted file mode 100644 index 1ad142425ecb..000000000000 --- a/sys/kern/kern_rndsink.c +++ /dev/null @@ -1,254 +0,0 @@ -/* $NetBSD: kern_rndsink.c,v 1.17 2016/05/21 15:33:40 riastradh Exp $ */ - -/*- - * Copyright (c) 2013 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Taylor R. Campbell. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__KERNEL_RCSID(0, "$NetBSD: kern_rndsink.c,v 1.17 2016/05/21 15:33:40 riastradh Exp $"); - -#include -#include -#include -#include -#include -#include -#include - -#include - -enum rsink_state { - RNDSINK_IDLE, /* no callback in progress */ - RNDSINK_QUEUED, /* queued for callback */ - RNDSINK_IN_FLIGHT, /* callback called */ - RNDSINK_REQUEUED, /* queued again before callback done */ - RNDSINK_DEAD, /* destroyed */ -}; - -struct rndsink { - /* Callback state. */ - enum rsink_state rsink_state; - - /* Entry on the queue of rndsinks, iff in the RNDSINK_QUEUED state. */ - TAILQ_ENTRY(rndsink) rsink_entry; - - /* rndsink_create parameters. */ - unsigned int rsink_bytes; - rndsink_callback_t *rsink_callback; - void *rsink_arg; -}; - -static struct { - kmutex_t lock; - kcondvar_t cv; - TAILQ_HEAD(, rndsink) q; -} rndsinks __cacheline_aligned; - -void -rndsinks_init(void) -{ - - /* - * This mutex must be at an ipl as high as the highest ipl of - * anyone who wants to call rndsink_request. - * - * XXX Call this IPL_RND, perhaps. - */ - mutex_init(&rndsinks.lock, MUTEX_DEFAULT, IPL_VM); - cv_init(&rndsinks.cv, "rndsink"); - TAILQ_INIT(&rndsinks.q); -} - -void -rndsinks_distribute(void) -{ - uint8_t buffer[RNDSINK_MAX_BYTES]; - struct rndsink *rndsink; - - explicit_memset(buffer, 0, sizeof(buffer)); /* paranoia */ - - mutex_spin_enter(&rndsinks.lock); - while ((rndsink = TAILQ_FIRST(&rndsinks.q)) != NULL) { - KASSERT(rndsink->rsink_state == RNDSINK_QUEUED); - - /* Bail if we can't get some entropy for this rndsink. */ - if (!rnd_tryextract(buffer, rndsink->rsink_bytes)) - break; - - /* - * Got some entropy. Take the sink off the queue and - * feed the entropy to the callback, with rndsinks_lock - * dropped. While running the callback, lock out - * rndsink_destroy by marking the sink in flight. - */ - TAILQ_REMOVE(&rndsinks.q, rndsink, rsink_entry); - rndsink->rsink_state = RNDSINK_IN_FLIGHT; - mutex_spin_exit(&rndsinks.lock); - - (*rndsink->rsink_callback)(rndsink->rsink_arg, buffer, - rndsink->rsink_bytes); - explicit_memset(buffer, 0, rndsink->rsink_bytes); - - mutex_spin_enter(&rndsinks.lock); - - /* - * If, while the callback was running, anyone requested - * it be queued up again, do so now. Otherwise, idle. - * Either way, it is now safe to destroy, so wake the - * pending rndsink_destroy, if there is one. - */ - if (rndsink->rsink_state == RNDSINK_REQUEUED) { - TAILQ_INSERT_TAIL(&rndsinks.q, rndsink, rsink_entry); - rndsink->rsink_state = RNDSINK_QUEUED; - } else { - KASSERT(rndsink->rsink_state == RNDSINK_IN_FLIGHT); - rndsink->rsink_state = RNDSINK_IDLE; - } - cv_broadcast(&rndsinks.cv); - } - mutex_spin_exit(&rndsinks.lock); - - explicit_memset(buffer, 0, sizeof(buffer)); /* paranoia */ -} - -static void -rndsinks_enqueue(struct rndsink *rndsink) -{ - - KASSERT(mutex_owned(&rndsinks.lock)); - - /* Kick on-demand entropy sources. */ - rnd_getmore(rndsink->rsink_bytes); - - /* Ensure this rndsink is on the queue. */ - switch (rndsink->rsink_state) { - case RNDSINK_IDLE: - /* Not on the queue and nobody is handling it. */ - TAILQ_INSERT_TAIL(&rndsinks.q, rndsink, rsink_entry); - rndsink->rsink_state = RNDSINK_QUEUED; - break; - - case RNDSINK_QUEUED: - /* Already on the queue. */ - break; - - case RNDSINK_IN_FLIGHT: - /* Someone is handling it. Ask to queue it up again. */ - rndsink->rsink_state = RNDSINK_REQUEUED; - break; - - case RNDSINK_REQUEUED: - /* Already asked to queue it up again. */ - break; - - case RNDSINK_DEAD: - panic("requesting entropy from dead rndsink: %p", rndsink); - - default: - panic("rndsink %p in unknown state: %d", rndsink, - (int)rndsink->rsink_state); - } -} - -struct rndsink * -rndsink_create(size_t bytes, rndsink_callback_t *callback, void *arg) -{ - struct rndsink *const rndsink = kmem_alloc(sizeof(*rndsink), KM_SLEEP); - - KASSERT(bytes <= RNDSINK_MAX_BYTES); - - rndsink->rsink_state = RNDSINK_IDLE; - rndsink->rsink_bytes = bytes; - rndsink->rsink_callback = callback; - rndsink->rsink_arg = arg; - - return rndsink; -} - -void -rndsink_destroy(struct rndsink *rndsink) -{ - - /* - * Make sure the rndsink is off the queue, and if it's already - * in flight, wait for the callback to complete. - */ - mutex_spin_enter(&rndsinks.lock); - while (rndsink->rsink_state != RNDSINK_IDLE) { - switch (rndsink->rsink_state) { - case RNDSINK_QUEUED: - TAILQ_REMOVE(&rndsinks.q, rndsink, rsink_entry); - rndsink->rsink_state = RNDSINK_IDLE; - break; - - case RNDSINK_IN_FLIGHT: - case RNDSINK_REQUEUED: - cv_wait(&rndsinks.cv, &rndsinks.lock); - break; - - case RNDSINK_DEAD: - panic("destroying dead rndsink: %p", rndsink); - - default: - panic("rndsink %p in unknown state: %d", rndsink, - (int)rndsink->rsink_state); - } - } - rndsink->rsink_state = RNDSINK_DEAD; - mutex_spin_exit(&rndsinks.lock); - - kmem_free(rndsink, sizeof(*rndsink)); -} - -void -rndsink_schedule(struct rndsink *rndsink) -{ - - /* Optimistically check without the lock whether we're queued. */ - if ((rndsink->rsink_state != RNDSINK_QUEUED) && - (rndsink->rsink_state != RNDSINK_REQUEUED)) { - mutex_spin_enter(&rndsinks.lock); - rndsinks_enqueue(rndsink); - mutex_spin_exit(&rndsinks.lock); - } -} - -bool -rndsink_request(struct rndsink *rndsink, void *buffer, size_t bytes) -{ - - KASSERT(bytes == rndsink->rsink_bytes); - - mutex_spin_enter(&rndsinks.lock); - const bool full_entropy = rnd_extract(buffer, bytes); - if (!full_entropy) - rndsinks_enqueue(rndsink); - mutex_spin_exit(&rndsinks.lock); - - return full_entropy; -} diff --git a/sys/kern/subr_autoconf.c b/sys/kern/subr_autoconf.c index 34499ede31d5..896bacc9a6e0 100644 --- a/sys/kern/subr_autoconf.c +++ b/sys/kern/subr_autoconf.c @@ -121,7 +121,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_autoconf.c,v 1.265 2018/12/01 02:08:16 msaitoh /* * Device autoconfiguration timings are mixed into the entropy pool. */ -extern krndsource_t rnd_autoconf_source; +krndsource_t rnd_autoconf_source; /* * ioconf.c exports exactly two names: cfdata and cfroots. All system @@ -356,6 +356,9 @@ config_init(void) initcftable.ct_cfdata = cfdata; TAILQ_INSERT_TAIL(&allcftables, &initcftable, ct_list); + rnd_attach_source(&rnd_autoconf_source, "autoconf", RND_TYPE_UNKNOWN, + RND_FLAG_COLLECT_TIME|RND_FLAG_ESTIMATE_TIME); + config_initialized = true; } diff --git a/sys/kern/subr_cprng.c b/sys/kern/subr_cprng.c index dbd0de925bfe..f1a67b26fdb7 100644 --- a/sys/kern/subr_cprng.c +++ b/sys/kern/subr_cprng.c @@ -1,11 +1,11 @@ -/* $NetBSD: subr_cprng.c,v 1.34 2019/12/04 05:36:34 riastradh Exp $ */ +/* $NetBSD$ */ /*- - * Copyright (c) 2011-2013 The NetBSD Foundation, Inc. + * Copyright (c) 2019 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Thor Lancelot Simon and Taylor R. Campbell. + * by Taylor R. Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,539 +29,278 @@ * POSSIBILITY OF SUCH DAMAGE. */ +/* + * cprng_strong + * + * Per-CPU NIST Hash_DRBG, reseeded automatically from the entropy + * pool when we transition to full entropy, never blocking. This + * is slightly different from the old cprng_strong API, but the + * only users of the old one fell into three categories: + * + * 1. never-blocking, oughta-be-per-CPU (kern_cprng, sysctl_prng) + * 2. never-blocking, used per-CPU anyway (/dev/urandom short reads) + * 3. /dev/random + * + * This code serves the first two categories without having extra + * logic for /dev/random. + * + * kern_cprng - available at IPL_VM or lower + * user_cprng - available only at IPL_NONE in thread context + * + * The name kern_cprng is for hysterical raisins. The name + * user_cprng serves only to contrast with kern_cprng. + */ + #include -__KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.34 2019/12/04 05:36:34 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD$"); -#include #include -#include #include +#include +#include #include -#include /* XXX struct knote */ -#include /* XXX FNONBLOCK */ -#include +#include +#include #include -#include -#include #include -#include /* XXX POLLIN/POLLOUT/&c. */ -#include -#include #include -#include +#include #include -#if defined(__HAVE_CPU_COUNTER) -#include -#endif - -static int sysctl_kern_urnd(SYSCTLFN_PROTO); -static int sysctl_kern_arnd(SYSCTLFN_PROTO); - -static void cprng_strong_generate(struct cprng_strong *, void *, size_t); -static void cprng_strong_reseed(struct cprng_strong *); -static void cprng_strong_reseed_from(struct cprng_strong *, const void *, - size_t, bool); - -static rndsink_callback_t cprng_strong_rndsink_callback; - -void -cprng_init(void) -{ - static struct sysctllog *random_sysctllog; - - if (nist_hash_drbg_initialize() != 0) - panic("NIST Hash_DRBG failed self-test"); - - sysctl_createv(&random_sysctllog, 0, NULL, NULL, - CTLFLAG_PERMANENT, - CTLTYPE_INT, "urandom", - SYSCTL_DESCR("Random integer value"), - sysctl_kern_urnd, 0, NULL, 0, - CTL_KERN, KERN_URND, CTL_EOL); - sysctl_createv(&random_sysctllog, 0, NULL, NULL, - CTLFLAG_PERMANENT, - CTLTYPE_INT, "arandom", - SYSCTL_DESCR("n bytes of random data"), - sysctl_kern_arnd, 0, NULL, 0, - CTL_KERN, KERN_ARND, CTL_EOL); -} - -static inline uint32_t -cprng_counter(void) -{ - struct timeval tv; - -#if defined(__HAVE_CPU_COUNTER) - if (cpu_hascounter()) - return cpu_counter32(); -#endif - if (__predict_false(cold)) { - static int ctr; - /* microtime unsafe if clock not running yet */ - return ctr++; - } - getmicrotime(&tv); - return (tv.tv_sec * 1000000 + tv.tv_usec); -} - +/* + * struct cprng_strong + */ struct cprng_strong { - char cs_name[16]; - int cs_flags; - kmutex_t cs_lock; - percpu_t *cs_percpu; - kcondvar_t cs_cv; - struct selinfo cs_selq; - struct rndsink *cs_rndsink; - bool cs_ready; - NIST_HASH_DRBG cs_drbg; - - /* XXX Kludge for /dev/random `information-theoretic' properties. */ - unsigned int cs_remaining; + struct percpu *cs_percpu; /* struct cprng_cpu */ + ipl_cookie_t cs_iplcookie; }; -struct cprng_strong * -cprng_strong_create(const char *name, int ipl, int flags) -{ - const uint32_t cc = cprng_counter(); - struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng), - KM_SLEEP); +/* + * struct cprng_cpu + * + * Per-CPU state for a cprng_strong. The DRBG is allocated + * separately because percpu(9) sometimes moves per-CPU objects + * around without zeroing them. + */ +struct cprng_cpu { + struct nist_hash_drbg *cc_drbg; + unsigned cc_epoch; +}; - /* - * rndsink_request takes a spin lock at IPL_VM, so we can be no - * higher than that. - */ - KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH); - - /* Initialize the easy fields. */ - memset(cprng->cs_name, 0, sizeof(cprng->cs_name)); - (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name)); - cprng->cs_flags = flags; - mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl); - cv_init(&cprng->cs_cv, cprng->cs_name); - selinit(&cprng->cs_selq); - cprng->cs_rndsink = rndsink_create(NIST_HASH_DRBG_MIN_SEEDLEN_BYTES, - &cprng_strong_rndsink_callback, cprng); - - /* Get some initial entropy. Record whether it is full entropy. */ - uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES]; - mutex_enter(&cprng->cs_lock); - cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed, - sizeof(seed)); - if (nist_hash_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed), - &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name))) - /* XXX Fix nist_hash_drbg API so this can't happen. */ - panic("cprng %s: NIST Hash_DRBG instantiation failed", - cprng->cs_name); - explicit_memset(seed, 0, sizeof(seed)); - - if (ISSET(flags, CPRNG_HARD)) - cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES; - else - cprng->cs_remaining = 0; - - if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY)) - printf("cprng %s: creating with partial entropy\n", - cprng->cs_name); - mutex_exit(&cprng->cs_lock); +static int sysctl_kern_urandom(SYSCTLFN_ARGS); +static int sysctl_kern_arandom(SYSCTLFN_ARGS); +static void cprng_init_cpu(void *, void *, struct cpu_info *); +static void cprng_fini_cpu(void *, void *, struct cpu_info *); - return cprng; -} +/* Well-known CPRNG instances */ +struct cprng_strong *kern_cprng __read_mostly; /* IPL_VM */ +struct cprng_strong *user_cprng __read_mostly; /* IPL_NONE */ + +static struct sysctllog *cprng_sysctllog __read_mostly; + +static struct evcnt cprng_strong_intr_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "cprng strong", "intr"); +EVCNT_ATTACH_STATIC(cprng_strong_intr_evcnt); +static struct evcnt cprng_strong_reseed_evcnt = + EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "cprng strong", "reseed"); +EVCNT_ATTACH_STATIC(cprng_strong_reseed_evcnt); void -cprng_strong_destroy(struct cprng_strong *cprng) +cprng_init(void) { /* - * Destroy the rndsink first to prevent calls to the callback. + * Create CPRNG instances at two IPLs: IPL_VM for kernel use + * that may occur inside IPL_VM interrupt handlers (!!??!?!?), + * and IPL_NONE for userland use which need not block + * interrupts. */ - rndsink_destroy(cprng->cs_rndsink); - - KASSERT(!cv_has_waiters(&cprng->cs_cv)); -#if 0 - KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */ -#endif - - nist_hash_drbg_destroy(&cprng->cs_drbg); - seldestroy(&cprng->cs_selq); - cv_destroy(&cprng->cs_cv); - mutex_destroy(&cprng->cs_lock); - - explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */ - kmem_free(cprng, sizeof(*cprng)); + kern_cprng = cprng_strong_create("kern", IPL_VM, 0); + user_cprng = cprng_strong_create("user", IPL_NONE, 0); + + /* Create kern.urandom and kern.arandom sysctl nodes. */ + sysctl_createv(&cprng_sysctllog, 0, NULL, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, "urandom", + SYSCTL_DESCR("Independent uniform random 32-bit integer"), + sysctl_kern_urandom, 0, NULL, 0, CTL_KERN, KERN_URND, CTL_EOL); + sysctl_createv(&cprng_sysctllog, 0, NULL, NULL, + CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT /*lie*/, "arandom", + SYSCTL_DESCR("Independent uniform random bytes, up to 256 bytes"), + sysctl_kern_arandom, 0, NULL, 0, CTL_KERN, KERN_ARND, CTL_EOL); } /* - * Generate some data from cprng. Block or return zero bytes, - * depending on flags & FNONBLOCK, if cprng was created without - * CPRNG_REKEY_ANY. + * sysctl kern.urandom + * + * Independent uniform random 32-bit integer. Read-only. */ -size_t -cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags) -{ - size_t result; - - /* Caller must loop for more than CPRNG_MAX_LEN bytes. */ - bytes = MIN(bytes, CPRNG_MAX_LEN); - - mutex_enter(&cprng->cs_lock); - - if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) { - if (!cprng->cs_ready) - cprng_strong_reseed(cprng); - } else { - while (!cprng->cs_ready) { - if (ISSET(flags, FNONBLOCK) || - !ISSET(cprng->cs_flags, CPRNG_USE_CV) || - cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) { - result = 0; - goto out; - } - } - } - - /* - * Debit the entropy if requested. - * - * XXX Kludge for /dev/random `information-theoretic' properties. - */ - if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) { - KASSERT(0 < cprng->cs_remaining); - KASSERT(cprng->cs_remaining <= - NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); - if (bytes < cprng->cs_remaining) { - cprng->cs_remaining -= bytes; - } else { - bytes = cprng->cs_remaining; - cprng->cs_remaining = NIST_HASH_DRBG_MIN_SEEDLEN_BYTES; - cprng->cs_ready = false; - rndsink_schedule(cprng->cs_rndsink); - } - KASSERT(bytes <= NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); - KASSERT(0 < cprng->cs_remaining); - KASSERT(cprng->cs_remaining <= - NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); - } - - cprng_strong_generate(cprng, buffer, bytes); - result = bytes; - -out: mutex_exit(&cprng->cs_lock); - return result; -} - -static void -filt_cprng_detach(struct knote *kn) +static int +sysctl_kern_urandom(SYSCTLFN_ARGS) { - struct cprng_strong *const cprng = kn->kn_hook; + struct sysctlnode node = *rnode; + int v; + int error; - mutex_enter(&cprng->cs_lock); - SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext); - mutex_exit(&cprng->cs_lock); -} + /* Generate an int's worth of data. */ + cprng_strong(user_cprng, &v, sizeof v, 0); -static int -filt_cprng_read_event(struct knote *kn, long hint) -{ - struct cprng_strong *const cprng = kn->kn_hook; - int ret; - - if (hint == NOTE_SUBMIT) - KASSERT(mutex_owned(&cprng->cs_lock)); - else - mutex_enter(&cprng->cs_lock); - if (cprng->cs_ready) { - kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large? */ - ret = 1; - } else { - ret = 0; - } - if (hint == NOTE_SUBMIT) - KASSERT(mutex_owned(&cprng->cs_lock)); - else - mutex_exit(&cprng->cs_lock); + /* Do the sysctl dance. */ + node.sysctl_data = &v; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); - return ret; + /* Clear the buffer before returning the sysctl error. */ + explicit_memset(&v, 0, sizeof v); + return error; } +/* + * sysctl kern.arandom + * + * Independent uniform random bytes, up to 256 bytes. Read-only. + */ static int -filt_cprng_write_event(struct knote *kn, long hint) +sysctl_kern_arandom(SYSCTLFN_ARGS) { - struct cprng_strong *const cprng = kn->kn_hook; - - if (hint == NOTE_SUBMIT) - KASSERT(mutex_owned(&cprng->cs_lock)); - else - mutex_enter(&cprng->cs_lock); - - kn->kn_data = 0; - - if (hint == NOTE_SUBMIT) - KASSERT(mutex_owned(&cprng->cs_lock)); - else - mutex_exit(&cprng->cs_lock); - - return 0; -} - -static const struct filterops cprng_read_filtops = { - .f_isfd = 1, - .f_attach = NULL, - .f_detach = filt_cprng_detach, - .f_event = filt_cprng_read_event, -}; + struct sysctlnode node = *rnode; + uint8_t buf[256]; + int error; -static const struct filterops cprng_write_filtops = { - .f_isfd = 1, - .f_attach = NULL, - .f_detach = filt_cprng_detach, - .f_event = filt_cprng_write_event, -}; + /* + * Clamp to a reasonably small size. 256 bytes is kind of + * arbitrary; 32 would be more reasonable, but we used 256 in + * the past, so let's not break compatibility. + */ + if (*oldlenp > 256) /* size_t, so never negative */ + return E2BIG; -int -cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn) -{ + /* Generate data. */ + cprng_strong(user_cprng, buf, *oldlenp, 0); - switch (kn->kn_filter) { - case EVFILT_READ: - kn->kn_fop = &cprng_read_filtops; - break; - case EVFILT_WRITE: - kn->kn_fop = &cprng_write_filtops; - break; - default: - return EINVAL; - } + /* Do the sysctl dance. */ + node.sysctl_data = buf; + node.sysctl_size = *oldlenp; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); - kn->kn_hook = cprng; - mutex_enter(&cprng->cs_lock); - SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext); - mutex_exit(&cprng->cs_lock); - return 0; + /* Clear the buffer before returning the sysctl error. */ + explicit_memset(buf, 0, sizeof buf); + return error; } -int -cprng_strong_poll(struct cprng_strong *cprng, int events) +struct cprng_strong * +cprng_strong_create(const char *name, int ipl, int flags) { - int revents; + struct cprng_strong *cprng; - if (!ISSET(events, (POLLIN | POLLRDNORM))) - return 0; + cprng = kmem_alloc(sizeof(*cprng), KM_SLEEP); + cprng->cs_percpu = percpu_alloc(sizeof(struct cprng_cpu)); + cprng->cs_iplcookie = makeiplcookie(ipl); + percpu_foreach(cprng->cs_percpu, cprng_init_cpu, __UNCONST(name)); - mutex_enter(&cprng->cs_lock); - if (cprng->cs_ready) { - revents = (events & (POLLIN | POLLRDNORM)); - } else { - selrecord(curlwp, &cprng->cs_selq); - revents = 0; - } - mutex_exit(&cprng->cs_lock); - - return revents; -} - -/* - * XXX Move nist_hash_drbg_reseed_advised_p and - * nist_hash_drbg_reseed_needed_p into the nist_hash_drbg API and make - * the NIST_HASH_DRBG structure opaque. - */ -static bool -nist_hash_drbg_reseed_advised_p(NIST_HASH_DRBG *drbg) -{ - - return (drbg->reseed_counter > (NIST_HASH_DRBG_RESEED_INTERVAL / 2)); + return cprng; } -static bool -nist_hash_drbg_reseed_needed_p(NIST_HASH_DRBG *drbg) +void +cprng_strong_destroy(struct cprng_strong *cprng) { - return (drbg->reseed_counter >= NIST_HASH_DRBG_RESEED_INTERVAL); + percpu_foreach(cprng->cs_percpu, cprng_fini_cpu, NULL); + percpu_free(cprng->cs_percpu, sizeof(struct cprng_cpu)); + kmem_free(cprng, sizeof(*cprng)); } -/* - * Generate some data from the underlying generator. - */ static void -cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes) +cprng_init_cpu(void *ptr, void *cookie, struct cpu_info *ci) { - const uint32_t cc = cprng_counter(); - - KASSERT(bytes <= CPRNG_MAX_LEN); - KASSERT(mutex_owned(&cprng->cs_lock)); - - /* - * Generate some data from the NIST Hash_DRBG. Caller - * guarantees reseed if we're not ready, and if we exhaust the - * generator, we mark ourselves not ready. Consequently, this - * call to the Hash_DRBG should not fail. - */ - if (__predict_false(nist_hash_drbg_generate(&cprng->cs_drbg, buffer, - bytes, &cc, sizeof(cc)))) - panic("cprng %s: NIST Hash_DRBG failed", cprng->cs_name); + struct cprng_cpu *cc = ptr; + const char *name = cookie; + uint8_t zero[NIST_HASH_DRBG_SEEDLEN_BYTES] = {0}; + char namebuf[64]; /* XXX size? */ /* - * If we've been seeing a lot of use, ask for some fresh - * entropy soon. + * Format the name as, e.g., kern/8 if we're on cpu8. This + * doesn't get displayed anywhere; it just ensures that if + * there were a bug causing us to use the same otherwise secure + * seed on multiple CPUs, we would still get independent output + * from the NIST Hash_DRBG. */ - if (__predict_false(nist_hash_drbg_reseed_advised_p(&cprng->cs_drbg))) - rndsink_schedule(cprng->cs_rndsink); + snprintf(namebuf, sizeof namebuf, "%s/%u", name, cpu_index(ci)); /* - * If we just exhausted the generator, inform the next user - * that we need a reseed. + * Allocate the struct nist_hash_drbg separately, since + * percpu(9) may move objects around in memory without zeroing. */ - if (__predict_false(nist_hash_drbg_reseed_needed_p(&cprng->cs_drbg))) { - cprng->cs_ready = false; - rndsink_schedule(cprng->cs_rndsink); /* paranoia */ - } + cc->cc_drbg = kmem_zalloc(sizeof(*cc->cc_drbg), KM_SLEEP); + if (nist_hash_drbg_instantiate(cc->cc_drbg, zero, sizeof zero, NULL, 0, + namebuf, strlen(namebuf))) + panic("nist_hash_drbg_instantiate"); + cc->cc_epoch = 0; /* uninitialized; cause reseed on first use */ } -/* - * Reseed with whatever we can get from the system entropy pool right now. - */ static void -cprng_strong_reseed(struct cprng_strong *cprng) +cprng_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci) { - uint8_t seed[NIST_HASH_DRBG_MIN_SEEDLEN_BYTES]; - - KASSERT(mutex_owned(&cprng->cs_lock)); + struct cprng_cpu *cc = ptr; - const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed, - sizeof(seed)); - cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy); - explicit_memset(seed, 0, sizeof(seed)); + if (nist_hash_drbg_destroy(cc->cc_drbg)) + panic("nist_hash_drbg_destroy"); + kmem_free(cc->cc_drbg, sizeof(*cc->cc_drbg)); } -/* - * Reseed with the given seed. If we now have full entropy, notify waiters. - */ -static void -cprng_strong_reseed_from(struct cprng_strong *cprng, - const void *seed, size_t bytes, bool full_entropy) +size_t +cprng_strong(struct cprng_strong *cprng, void *buf, size_t len, int flags) { - const uint32_t cc = cprng_counter(); + uint32_t seed[NIST_HASH_DRBG_SEEDLEN_BYTES]; + struct cprng_cpu *cc; + unsigned epoch; + int s; - KASSERT(bytes == NIST_HASH_DRBG_MIN_SEEDLEN_BYTES); - KASSERT(mutex_owned(&cprng->cs_lock)); + if (cpu_intr_p()) + cprng_strong_intr_evcnt.ev_count++; /* - * Notify anyone interested in the partiality of entropy in our - * seed -- anyone waiting for full entropy, or any system - * operators interested in knowing when the entropy pool is - * running on fumes. + * Verify maximum request length. Caller should really limit + * their requests to 32 bytes to avoid spending much time with + * preemption disabled -- use the 32 bytes to seed a private + * DRBG instance if you need more data. */ - if (full_entropy) { - if (!cprng->cs_ready) { - cprng->cs_ready = true; - cv_broadcast(&cprng->cs_cv); - selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM), - NOTE_SUBMIT); - } - } else { - /* - * XXX Is there is any harm in reseeding with partial - * entropy when we had full entropy before? If so, - * remove the conditional on this message. - */ - if (!cprng->cs_ready && - !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) - printf("cprng %s: reseeding with partial entropy\n", - cprng->cs_name); + KASSERT(len <= CPRNG_MAX_LEN); + + /* Verify legacy API use. */ + KASSERT(flags == 0); + + /* Acquire per-CPU state and block interrupts. */ + cc = percpu_getref(cprng->cs_percpu); + s = splraiseipl(cprng->cs_iplcookie); + + /* If the entropy epoch has changed, (re)seed. */ + epoch = entropy_epoch(); + if (__predict_false(epoch != cc->cc_epoch)) { + entropy_extract(seed, sizeof seed, 0); + cprng_strong_reseed_evcnt.ev_count++; + if (nist_hash_drbg_reseed(cc->cc_drbg, seed, sizeof seed, + NULL, 0)) + panic("nist_hash_drbg_reseed"); + explicit_memset(seed, 0, sizeof seed); + cc->cc_epoch = epoch; } - if (nist_hash_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, - sizeof(cc))) - /* XXX Fix nist_hash_drbg API so this can't happen. */ - panic("cprng %s: NIST Hash_DRBG reseed failed", - cprng->cs_name); -} - -/* - * Feed entropy from an rndsink request into the CPRNG for which the - * request was issued. - */ -static void -cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes) -{ - struct cprng_strong *const cprng = context; - - mutex_enter(&cprng->cs_lock); - /* Assume that rndsinks provide only full-entropy output. */ - cprng_strong_reseed_from(cprng, seed, bytes, true); - mutex_exit(&cprng->cs_lock); -} - -static ONCE_DECL(sysctl_prng_once); -static cprng_strong_t *sysctl_prng; - -static int -makeprng(void) -{ - - /* can't create in cprng_init(), too early */ - sysctl_prng = cprng_strong_create("sysctl", IPL_NONE, - CPRNG_INIT_ANY|CPRNG_REKEY_ANY); - return 0; -} - -/* - * sysctl helper routine for kern.urandom node. Picks a random number - * for you. - */ -static int -sysctl_kern_urnd(SYSCTLFN_ARGS) -{ - int v, rv; - - RUN_ONCE(&sysctl_prng_once, makeprng); - rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0); - if (rv == sizeof(v)) { - struct sysctlnode node = *rnode; - node.sysctl_data = &v; - return (sysctl_lookup(SYSCTLFN_CALL(&node))); + /* Generate data. Failure here means it's time to reseed. */ + if (nist_hash_drbg_generate(cc->cc_drbg, buf, len, NULL, 0)) { + entropy_extract(seed, sizeof seed, 0); + cprng_strong_reseed_evcnt.ev_count++; + if (nist_hash_drbg_reseed(cc->cc_drbg, seed, sizeof seed, + NULL, 0)) + panic("nist_hash_drbg_reseed"); + explicit_memset(seed, 0, sizeof seed); + if (nist_hash_drbg_generate(cc->cc_drbg, buf, len, NULL, 0)) + panic("nist_hash_drbg_generate"); } - else - return (EIO); /*XXX*/ -} -/* - * sysctl helper routine for kern.arandom node. Fills the supplied - * structure with random data for you. - * - * This node was originally declared as type "int" but its implementation - * in OpenBSD, whence it came, would happily return up to 8K of data if - * requested. Evidently this was used to key RC4 in userspace. - * - * In NetBSD, the libc stack-smash-protection code reads 64 bytes - * from here at every program startup. Third-party software also often - * uses this to obtain a key for CSPRNG, reading 32 bytes or more, while - * avoiding the need to open /dev/urandom. - */ -static int -sysctl_kern_arnd(SYSCTLFN_ARGS) -{ - int error; - void *v; - struct sysctlnode node = *rnode; - size_t n __diagused; - - switch (*oldlenp) { - case 0: - return 0; - default: - if (*oldlenp > 256) { - return E2BIG; - } - RUN_ONCE(&sysctl_prng_once, makeprng); - v = kmem_alloc(*oldlenp, KM_SLEEP); - n = cprng_strong(sysctl_prng, v, *oldlenp, 0); - KASSERT(n == *oldlenp); - node.sysctl_data = v; - node.sysctl_size = *oldlenp; - error = sysctl_lookup(SYSCTLFN_CALL(&node)); - kmem_free(v, *oldlenp); - return error; - } + /* Release state and interrupts. */ + splx(s); + percpu_putref(cprng->cs_percpu); + + /* Return the number of bytes generated, for hysterical raisins. */ + return len; } diff --git a/sys/lib/libkern/Makefile.libkern b/sys/lib/libkern/Makefile.libkern index a0620d7d438b..bc6ca6b90646 100644 --- a/sys/lib/libkern/Makefile.libkern +++ b/sys/lib/libkern/Makefile.libkern @@ -35,6 +35,7 @@ CPPFLAGS+= -I$M ${KERNCPPFLAGS} ${KERNMISCCPPFLAGS} .include "${.PARSEDIR}/../../../common/lib/libppath/Makefile.inc" CPPFLAGS+= -I${KERNDIR}/../../../common/include +CPPFLAGS+= -I${KERNDIR}/../../../common/libc/hash/sha3 .PATH.c: ${KERNDIR} .if exists ($M/Makefile.inc) @@ -94,6 +95,8 @@ SRCS+= hexdump.c # for crypto SRCS+= explicit_memset.c consttime_memequal.c +SRCS+= entpool.c + .PATH: ${NETBSDSRCDIR}/common/lib/libc/cdb SRCS+= cdbr.c SRCS+= mi_vector_hash.c diff --git a/sys/lib/libkern/entpool.c b/sys/lib/libkern/entpool.c new file mode 100644 index 000000000000..da610a0e29b9 --- /dev/null +++ b/sys/lib/libkern/entpool.c @@ -0,0 +1,750 @@ +/* $NetBSD$ */ + +/*- + * Copyright (c) 2019 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Taylor R. Campbell. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Entropy pool (`reseedable pseudorandom number generator') based on a + * sponge duplex, following the design described and analyzed in + * + * Guido Bertoni, Joan Daemen, Michaël Peeters, and Gilles Van + * Assche, `Sponge-Based Pseudo-Random Number Generators', in + * Stefan Mangard and François-Xavier Standaert, eds., + * Cryptographic Hardware and Embedded Systems—CHES 2010, Springer + * LNCS 6225, pp. 33–47. + * https://link.springer.com/chapter/10.1007/978-3-642-15031-9_3 + * https://keccak.team/files/SpongePRNG.pdf + * + * Guido Bertoni, Joan Daemen, Michaël Peeters, and Gilles Van + * Assche, `Duplexing the Sponge: Single-Pass Authenticated + * Encryption and Other Applications', in Ali Miri and Serge + * Vaudenay, eds., Selected Areas in Cryptography—SAC 2011, + * Springer LNCS 7118, pp. 320–337. + * https://link.springer.com/chapter/10.1007/978-3-642-28496-0_19 + * https://keccak.team/files/SpongeDuplex.pdf + * + * We make the following tweaks that don't affect security: + * + * - Samples are length-delimited 7-bit variable-length encoding. + * The encoding is still injective, so the security theorems + * continue to apply. + * + * - Output is not buffered -- callers should draw 32 bytes and + * expand with a stream cipher. In effect, every output draws + * the full rate, and we just discard whatever the caller didn't + * ask for; the impact is only on performance, not security. + * + * On top of the underlying sponge state, an entropy pool maintains an + * integer i in [0, RATE-1] indicating where to write the next byte in + * the input buffer. Zeroing an entropy pool initializes it. + */ + +#if defined(_KERNEL) || defined(_STANDALONE) +#include +__KERNEL_RCSID(0, "$NetBSD$"); +#endif + +#include "entpool.h" +#include ENTPOOL_HEADER + +#if defined(_KERNEL) || defined(_STANDALONE) +#include +#include +#define ASSERT KASSERT +#else +#include +#include +#include +#include +#include +#define ASSERT assert +#define CTASSERT __CTASSERT +#endif + +#define secret /* must not use in variable-time operations; should zero */ +#define arraycount(A) (sizeof(A)/sizeof((A)[0])) +#define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) + +#define RATE ENTPOOL_RATE + +/* + * stir(P) + * + * Internal subroutine to apply the sponge permutation to the + * state in P. Resets P->i to 0 to indicate that the input buffer + * is empty. + */ +static void +stir(struct entpool *P) +{ + size_t i; + + /* + * Switch to the permutation's byte order, if necessary, apply + * permutation, and then switch back. This way we can data in + * and out byte by byte, but get the same answers out of test + * vectors. + */ + for (i = 0; i < arraycount(P->s.w); i++) + P->s.w[i] = ENTPOOL_WTOH(P->s.w[i]); + ENTPOOL_PERMUTE(P->s.w); + for (i = 0; i < arraycount(P->s.w); i++) + P->s.w[i] = ENTPOOL_HTOW(P->s.w[i]); + + /* Reset the input buffer. */ + P->i = 0; +} + +/* + * entpool_enter(P, buf, len) + * + * Enter len bytes from buf into the entropy pool P, stirring as + * needed. + */ +void +entpool_enter(struct entpool *P, const void *buf, size_t len) +{ + const uint8_t *p = buf; + size_t n = len, n1 = n; + + /* Sanity-check P->i. */ + ASSERT(P->i <= RATE-1); + + /* Encode the length, stirring as needed. */ + while (n1) { + if (P->i == RATE-1) + stir(P); + ASSERT(P->i < RATE-1); + P->s.u8[P->i++] ^= (n1 >= 0x80 ? 0x80 : 0) | (n1 & 0x7f); + n1 >>= 7; + } + + /* Enter the sample, stirring as needed. */ + while (n --> 0) { + if (P->i == RATE-1) + stir(P); + ASSERT(P->i < RATE-1); + P->s.u8[P->i++] ^= *p++; + } + + /* If we filled the input buffer exactly, stir once more. */ + if (P->i == RATE-1) + stir(P); + ASSERT(P->i < RATE-1); +} + +/* + * entpool_enter_nostir(P, buf, len) + * + * Enter as many bytes as possible, up to len, from buf into the + * entropy pool P. + * + * Return true if the sample was consumed in its entirety, or true + * if the sample was truncated so the caller should arrange to + * call entpool_stir when it is next convenient to do so. + * + * This function is cheap -- it only xors the input into the + * state, and never calls the underlying permutation, but it may + * truncate samples. + */ +bool +entpool_enter_nostir(struct entpool *P, const void *buf, size_t len) +{ + const uint8_t *p = buf; + size_t n0, n; + + /* Sanity-check P->i. */ + ASSERT(P->i <= RATE-1); + + /* If the input buffer is full, fail. */ + if (P->i == RATE-1) + return false; + ASSERT(P->i < RATE-1); + + /* + * Truncate the sample and enter it with 1-byte length encoding + * -- don't bother with variable-length encoding, not worth the + * trouble. + */ + n = n0 = MIN(127, MIN(len, RATE-1 - P->i - 1)); + P->s.u8[P->i++] ^= n; + while (n --> 0) + P->s.u8[P->i++] ^= *p++; + + /* Can't guarantee anything better than 0 <= i <= RATE-1. */ + ASSERT(P->i <= RATE-1); + + /* Return true if all done, false if truncated and in need of stir. */ + return (n0 == len); +} + +/* + * entpool_stir(P) + * + * Stir the entropy pool after entpool_enter_nostir fails. If it + * has already been stirred already, this has no effect. + */ +void +entpool_stir(struct entpool *P) +{ + + /* Sanity-check P->i. */ + ASSERT(P->i <= RATE-1); + + /* If the input buffer is full, stir. */ + if (P->i == RATE-1) + stir(P); + ASSERT(P->i < RATE-1); +} + +/* + * entpool_extract(P, buf, len) + * + * Extract len bytes from the entropy pool P into buf. + */ +void +entpool_extract(struct entpool *P, secret void *buf, size_t len) +{ + uint8_t *p = buf; + size_t n = len; + + /* Sanity-check P->i. */ + ASSERT(P->i <= RATE-1); + + /* If input buffer is not empty, stir. */ + if (P->i != 0) + stir(P); + ASSERT(P->i == 0); + + /* + * Copy out and zero (RATE-1)-sized chunks at a time, stirring + * with a bit set to distinguish this from inputs. + */ + while (n >= RATE-1) { + memcpy(p, P->s.u8, RATE-1); + memset(P->s.u8, 0, RATE-1); + P->s.u8[RATE-1] ^= 0x80; + stir(P); + p += RATE-1; + n -= RATE-1; + } + + /* + * If there's anything left, copy out a partial rate's worth + * and zero the entire rate's worth, stirring with a bit set to + * distinguish this from inputs. + */ + if (n) { + ASSERT(n < RATE-1); + memcpy(p, P->s.u8, n); /* Copy part of it. */ + memset(P->s.u8, 0, RATE-1); /* Zero all of it. */ + P->s.u8[RATE-1] ^= 0x80; + stir(P); + } +} + +/* + * Known-answer tests + */ + +#if ENTPOOL_SMALL + +#define KATLEN 15 + +/* Gimli */ +static const uint8_t known_answers[][KATLEN] = { + [0] = { + 0x69,0xb8,0x49,0x0d,0x39,0xfb,0x42,0x61, + 0xf7,0x66,0xdf,0x04,0xb6,0xed,0x11, + }, + [1] = { + 0x74,0x15,0x16,0x49,0x31,0x07,0x77,0xa1, + 0x3b,0x4d,0x78,0xc6,0x5d,0xef,0x87, + }, + [2] = { + 0xae,0xfd,0x7d,0xc4,0x3b,0xce,0x09,0x25, + 0xbf,0x60,0x21,0x6e,0x3c,0x3a,0x84, + }, + [3] = { + 0xae,0xfd,0x7d,0xc4,0x3b,0xce,0x09,0x25, + 0xbf,0x60,0x21,0x6e,0x3c,0x3a,0x84, + }, + [4] = { + 0x69,0xb8,0x49,0x0d,0x39,0xfb,0x42,0x61, + 0xf7,0x66,0xdf,0x04,0xb6,0xed,0x11, + }, + [5] = { + 0xa9,0x3c,0x3c,0xac,0x5f,0x6d,0x80,0xdc, + 0x33,0x0c,0xb2,0xe3,0xdd,0x55,0x31, + }, + [6] = { + 0x2e,0x69,0x1a,0x2a,0x2d,0x09,0xd4,0x5e, + 0x49,0xcc,0x8c,0xb2,0x0b,0xcc,0x42, + }, + [7] = { + 0xae,0xfd,0x7d,0xc4,0x3b,0xce,0x09,0x25, + 0xbf,0x60,0x21,0x6e,0x3c,0x3a,0x84, + }, + [8] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00, + }, + [9] = { + 0x69,0xb8,0x49,0x0d,0x39,0xfb,0x42,0x61, + 0xf7,0x66,0xdf,0x04,0xb6,0xed,0x11, + }, + [10] = { + 0x2e,0x69,0x1a,0x2a,0x2d,0x09,0xd4,0x5e, + 0x49,0xcc,0x8c,0xb2,0x0b,0xcc,0x42, + }, + [11] = { + 0x6f,0xfd,0xd2,0x29,0x78,0x46,0xc0,0x7d, + 0xc7,0xf2,0x0a,0x2b,0x72,0xd6,0xc6, + }, + [12] = { + 0x86,0xf0,0xc1,0xf9,0x95,0x0f,0xc9,0x12, + 0xde,0x38,0x39,0x10,0x1f,0x8c,0xc4, + }, +}; + +#else /* !ENTPOOL_SMALL */ + +#define KATLEN 16 + +/* Keccak-p[1600, 24] */ +static const uint8_t known_answers[][KATLEN] = { + [0] = { + 0x3b,0x20,0xf0,0xe9,0xce,0x94,0x48,0x07, + 0x97,0xb6,0x16,0xb5,0xb5,0x05,0x1a,0xce, + }, + [1] = { + 0x57,0x49,0x6e,0x28,0x7f,0xaa,0xee,0x6c, + 0xa8,0xb0,0xf5,0x0b,0x87,0xae,0xd6,0xd6, + }, + [2] = { + 0x51,0x72,0x0f,0x59,0x54,0xe1,0xaf,0xa8, + 0x16,0x67,0xfa,0x3f,0x8a,0x19,0x52,0x50, + }, + [3] = { + 0x51,0x72,0x0f,0x59,0x54,0xe1,0xaf,0xa8, + 0x16,0x67,0xfa,0x3f,0x8a,0x19,0x52,0x50, + }, + [4] = { + 0x3b,0x20,0xf0,0xe9,0xce,0x94,0x48,0x07, + 0x97,0xb6,0x16,0xb5,0xb5,0x05,0x1a,0xce, + }, + [5] = { + 0x95,0x23,0x77,0xe4,0x84,0xeb,0xaa,0x2e, + 0x6a,0x99,0xc2,0x52,0x06,0x6d,0xdf,0xea, + }, + [6] = { + 0x8c,0xdd,0x1b,0xaf,0x0e,0xf6,0xe9,0x1d, + 0x51,0x33,0x68,0x38,0x8d,0xad,0x55,0x84, + }, + [7] = { + 0x51,0x72,0x0f,0x59,0x54,0xe1,0xaf,0xa8, + 0x16,0x67,0xfa,0x3f,0x8a,0x19,0x52,0x50, + }, + [8] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + }, + [9] = { + 0x3b,0x20,0xf0,0xe9,0xce,0x94,0x48,0x07, + 0x97,0xb6,0x16,0xb5,0xb5,0x05,0x1a,0xce, + }, + [10] = { + 0x8c,0xdd,0x1b,0xaf,0x0e,0xf6,0xe9,0x1d, + 0x51,0x33,0x68,0x38,0x8d,0xad,0x55,0x84, + }, + [11] = { + 0xf6,0xc1,0x14,0xbb,0x13,0x0a,0xaf,0xed, + 0xca,0x0b,0x35,0x2c,0xf1,0x2b,0x1a,0x85, + }, + [12] = { + 0xf9,0x4b,0x05,0xd1,0x8b,0xcd,0xb3,0xd0, + 0x77,0x27,0xfe,0x46,0xf9,0x33,0xb2,0xa2, + }, +}; + +#endif + +#define KAT_BEGIN(P, n) memset(P, 0, sizeof(*(P))) +#define KAT_ERROR() return -1 +#define KAT_END(P, n) do \ +{ \ + uint8_t KAT_ACTUAL[KATLEN]; \ + entpool_extract(P, KAT_ACTUAL, KATLEN); \ + if (memcmp(KAT_ACTUAL, known_answers[n], KATLEN)) \ + return -1; \ +} while (0) + +int +entpool_selftest(void) +{ + struct entpool pool, *P = &pool; + uint8_t sample[1] = {0xff}; + uint8_t scratch[RATE]; + const uint8_t zero[RATE] = {0}; + + /* Test entpool_enter with empty buffer. */ + KAT_BEGIN(P, 0); + entpool_stir(P); /* noop */ + entpool_enter(P, sample, 1); + entpool_stir(P); /* noop */ + KAT_END(P, 0); + + /* Test entpool_enter with partial buffer. */ + KAT_BEGIN(P, 1); + entpool_stir(P); /* noop */ +#if ENTPOOL_SMALL + entpool_enter(P, zero, RATE-3); +#else + entpool_enter(P, zero, RATE-4); +#endif + entpool_stir(P); /* noop */ + entpool_enter(P, sample, 1); + entpool_stir(P); /* noop */ + KAT_END(P, 1); + + /* Test entpool_enter with full buffer. */ + KAT_BEGIN(P, 2); + entpool_stir(P); /* noop */ +#if ENTPOOL_SMALL + if (!entpool_enter_nostir(P, zero, RATE-2)) + KAT_ERROR(); +#else + if (!entpool_enter_nostir(P, zero, 127)) + KAT_ERROR(); + if (!entpool_enter_nostir(P, zero, RATE-2 - 127 - 1)) + KAT_ERROR(); +#endif + entpool_enter(P, sample, 1); + entpool_stir(P); /* noop */ + KAT_END(P, 2); + + /* Test entpool_enter with full buffer after stir. */ + KAT_BEGIN(P, 3); + entpool_stir(P); /* noop */ +#if ENTPOOL_SMALL + if (!entpool_enter_nostir(P, zero, RATE-2)) + KAT_ERROR(); +#else + CTASSERT(127 <= RATE-2); + if (!entpool_enter_nostir(P, zero, 127)) + KAT_ERROR(); + if (!entpool_enter_nostir(P, zero, RATE-2 - 127 - 1)) + KAT_ERROR(); +#endif + entpool_stir(P); + entpool_enter(P, sample, 1); + entpool_stir(P); /* noop */ + KAT_END(P, 3); + + /* Test entpool_enter_nostir with empty buffer. */ + KAT_BEGIN(P, 4); + entpool_stir(P); /* noop */ + if (!entpool_enter_nostir(P, sample, 1)) + KAT_ERROR(); + entpool_stir(P); /* noop */ + KAT_END(P, 4); + + /* Test entpool_enter_nostir with partial buffer. */ + KAT_BEGIN(P, 5); + entpool_stir(P); /* noop */ +#if ENTPOOL_SMALL + entpool_enter(P, zero, RATE-3); +#else + entpool_enter(P, zero, RATE-4); +#endif + entpool_stir(P); /* noop */ + if (entpool_enter_nostir(P, sample, 1)) + KAT_ERROR(); + entpool_stir(P); + KAT_END(P, 5); + + /* Test entpool_enter_nostir with full buffer. */ + KAT_BEGIN(P, 6); + entpool_stir(P); /* noop */ +#if ENTPOOL_SMALL + if (!entpool_enter_nostir(P, zero, RATE-2)) + KAT_ERROR(); +#else + CTASSERT(127 <= RATE-2); + if (!entpool_enter_nostir(P, zero, 127)) + KAT_ERROR(); + if (!entpool_enter_nostir(P, zero, RATE-2 - 127 - 1)) + KAT_ERROR(); +#endif + if (entpool_enter_nostir(P, sample, 1)) + KAT_ERROR(); + entpool_stir(P); + KAT_END(P, 6); + + /* Test entpool_enter_nostir with full buffer after stir. */ + KAT_BEGIN(P, 7); + entpool_stir(P); /* noop */ +#if ENTPOOL_SMALL + if (!entpool_enter_nostir(P, zero, RATE-2)) + KAT_ERROR(); +#else + CTASSERT(127 <= RATE-2); + if (!entpool_enter_nostir(P, zero, 127)) + KAT_ERROR(); + if (!entpool_enter_nostir(P, zero, RATE-2 - 127 - 1)) + KAT_ERROR(); +#endif + entpool_stir(P); + if (!entpool_enter_nostir(P, sample, 1)) + KAT_ERROR(); + entpool_stir(P); /* noop */ + KAT_END(P, 7); + + /* Test entpool_extract with empty input buffer. */ + KAT_BEGIN(P, 8); + entpool_stir(P); /* noop */ + KAT_END(P, 8); + + /* Test entpool_extract with nonempty input buffer. */ + KAT_BEGIN(P, 9); + entpool_stir(P); /* noop */ + entpool_enter(P, sample, 1); + entpool_stir(P); /* noop */ + KAT_END(P, 9); + + /* Test entpool_extract with full input buffer. */ + KAT_BEGIN(P, 10); + entpool_stir(P); /* noop */ +#if ENTPOOL_SMALL + if (!entpool_enter_nostir(P, zero, RATE-2)) + KAT_ERROR(); +#else + CTASSERT(127 <= RATE-2); + if (!entpool_enter_nostir(P, zero, 127)) + KAT_ERROR(); + if (!entpool_enter_nostir(P, zero, RATE-2 - 127 - 1)) + KAT_ERROR(); +#endif + KAT_END(P, 10); + + /* Test entpool_extract with iterated output. */ + KAT_BEGIN(P, 11); + entpool_stir(P); /* noop */ + entpool_extract(P, scratch, RATE-1 + 1); + entpool_stir(P); /* noop */ + KAT_END(P, 11); + + /* Test extract, enter, extract. */ + KAT_BEGIN(P, 12); + entpool_stir(P); /* noop */ + entpool_extract(P, scratch, 1); + entpool_stir(P); /* noop */ + entpool_enter(P, sample, 1); + entpool_stir(P); /* noop */ + KAT_END(P, 12); + + return 0; +} + +#if ENTPOOL_TEST +int +main(void) +{ + return entpool_selftest(); +} +#endif + +/* + * Known-answer test generation + * + * This generates the known-answer test vectors from explicitly + * specified duplex inputs that correspond to what entpool_enter + * &c. induce, to confirm the encoding of inputs works as + * intended. + */ + +#if ENTPOOL_GENKAT + +#include + +struct event { + enum { IN, OUT, STOP } t; + uint8_t b[RATE-1]; +}; + +/* Cases correspond to entpool_selftest above. */ +static const struct event *const cases[] = { + [0] = (const struct event[]) { + {IN, {1, 0xff}}, + {STOP, {0}}, + }, + [1] = (const struct event[]) { +#if ENTPOOL_SMALL + {IN, {RATE-3, [RATE-2] = 1}}, +#else + {IN, {0x80|((RATE-4)&0x7f), (RATE-4)>>7, [RATE-2] = 1}}, +#endif + {IN, {0xff}}, + {STOP, {0}}, + }, + [2] = (const struct event[]) { +#if ENTPOOL_SMALL + {IN, {RATE-2}}, +#else + {IN, {127, [128] = RATE-2 - 127 - 1}}, +#endif + {IN, {1, 0xff}}, + {STOP, {0}}, + }, + [3] = (const struct event[]) { +#if ENTPOOL_SMALL + {IN, {RATE-2}}, +#else + {IN, {127, [128] = RATE-2 - 127 - 1}}, +#endif + {IN, {1, 0xff}}, + {STOP, {0}}, + }, + [4] = (const struct event[]) { + {IN, {1, 0xff}}, + {STOP, {0}}, + }, + + [5] = (const struct event[]) { +#if ENTPOOL_SMALL + {IN, {RATE-3, [RATE-2] = 0 /* truncated length */}}, +#else + {IN, {0x80|((RATE-4)&0x7f), (RATE-4)>>7, + [RATE-2] = 0 /* truncated length */}}, +#endif + {STOP, {0}}, + }, + [6] = (const struct event[]) { +#if ENTPOOL_SMALL + {IN, {RATE-2}}, +#else + {IN, {127, [128] = RATE-2 - 127 - 1}}, +#endif + {STOP, {0}}, + }, + [7] = (const struct event[]) { +#if ENTPOOL_SMALL + {IN, {RATE-2}}, +#else + {IN, {127, [128] = RATE-2 - 127 - 1}}, +#endif + {IN, {1, 0xff}}, + {STOP, {0}}, + }, + [8] = (const struct event[]) { + {STOP, {0}}, + }, + [9] = (const struct event[]) { + {IN, {1, 0xff}}, + {STOP, {0}}, + }, + [10] = (const struct event[]) { +#if ENTPOOL_SMALL + {IN, {RATE-2}}, +#else + {IN, {127, [128] = RATE-2 - 127 - 1}}, +#endif + {STOP, {0}}, + }, + [11] = (const struct event[]) { + {OUT, {0}}, + {OUT, {0}}, + {STOP, {0}}, + }, + [12] = (const struct event[]) { + {OUT, {0}}, + {IN, {1, 0xff}}, + {STOP, {0}}, + }, +}; + +static void +compute(uint8_t output[KATLEN], const struct event *events) +{ + union { + uint8_t b[ENTPOOL_SIZE]; + ENTPOOL_WORD w[ENTPOOL_SIZE/sizeof(ENTPOOL_WORD)]; + } u; + unsigned i, j, k; + + memset(&u.b, 0, sizeof u.b); + for (i = 0;; i++) { + if (events[i].t == STOP) + break; + for (j = 0; j < sizeof(events[i].b); j++) + u.b[j] ^= events[i].b[j]; + if (events[i].t == OUT) { + memset(u.b, 0, RATE-1); + u.b[RATE-1] ^= 0x80; + } + + for (k = 0; k < arraycount(u.w); k++) + u.w[k] = ENTPOOL_WTOH(u.w[k]); + ENTPOOL_PERMUTE(u.w); + for (k = 0; k < arraycount(u.w); k++) + u.w[k] = ENTPOOL_HTOW(u.w[k]); + } + + for (j = 0; j < KATLEN; j++) + output[j] = u.b[j]; +} + +int +main(void) +{ + uint8_t output[KATLEN]; + unsigned i, j; + + printf("static const uint8_t known_answers[][KATLEN] = {\n"); + for (i = 0; i < arraycount(cases); i++) { + printf("\t[%u] = {\n", i); + compute(output, cases[i]); + for (j = 0; j < KATLEN; j++) { + if (j % 8 == 0) + printf("\t\t"); + printf("0x%02hhx,", output[j]); + if (j % 8 == 7) + printf("\n"); + } + if ((KATLEN % 8) != 0) + printf("\n"); + printf("\t},\n"); + } + printf("};\n"); + + fflush(stdout); + return ferror(stdout); +} + +#endif diff --git a/sys/dev/rnd_private.h b/sys/lib/libkern/entpool.h similarity index 54% rename from sys/dev/rnd_private.h rename to sys/lib/libkern/entpool.h index 7ff58dce7c4b..2ec9f59af979 100644 --- a/sys/dev/rnd_private.h +++ b/sys/lib/libkern/entpool.h @@ -1,12 +1,11 @@ -/* $NetBSD: rnd_private.h,v 1.11 2015/04/14 13:14:20 riastradh Exp $ */ +/* $NetBSD$ */ /*- - * Copyright (c) 1997 The NetBSD Foundation, Inc. + * Copyright (c) 2019 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Michael Graff . This code uses ideas and - * algorithms from the Linux driver written by Ted Ts'o. + * by Taylor R. Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,41 +29,51 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _DEV_RNDPRIVATE_H -#define _DEV_RNDPRIVATE_H +#ifndef ENTPOOL_H +#define ENTPOOL_H #include -#include -#include -#include -#include -#include +#include -/* - * Number of bytes returned per hash. This value is used in both - * rnd.c and rndpool.c to decide when enough entropy exists to do a - * hash to extract it. - */ -#define RND_ENTROPY_THRESHOLD 10 +#if defined(_KERNEL) || defined(_STANDALONE) +#include +#else +#include +#endif -bool rnd_extract(void *, size_t); -bool rnd_tryextract(void *, size_t); -void rnd_getmore(size_t); +#if ENTPOOL_SMALL +#define ENTPOOL_HEADER +#define ENTPOOL_PERMUTE gimli +#define ENTPOOL_SIZE 48 +#define ENTPOOL_WORD uint32_t +#define ENTPOOL_WTOH le32toh +#define ENTPOOL_HTOW htole32 +#define ENTPOOL_SECURITY 16 +#else +#define ENTPOOL_HEADER +#define ENTPOOL_PERMUTE keccakf1600 +#define ENTPOOL_SIZE 200 +#define ENTPOOL_WORD uint64_t +#define ENTPOOL_WTOH le64toh +#define ENTPOOL_HTOW htole64 +#define ENTPOOL_SECURITY 16 +#endif -/* - * Flag indicating rnd_init has run. - */ -extern int rnd_ready; +#define ENTPOOL_CAPACITY (2*ENTPOOL_SECURITY) +#define ENTPOOL_RATE (ENTPOOL_SIZE - ENTPOOL_CAPACITY) -/* - * Debugging flags. - */ -#ifdef RND_DEBUG -extern int rnd_debug; -#define RND_DEBUG_WRITE 0x0001 -#define RND_DEBUG_READ 0x0002 -#define RND_DEBUG_IOCTL 0x0004 -#define RND_DEBUG_SNOOZE 0x0008 -#endif +struct entpool { + union { + uint8_t u8[ENTPOOL_SIZE]; + ENTPOOL_WORD w[ENTPOOL_SIZE/sizeof(ENTPOOL_WORD)]; + } s; + unsigned i; +}; + +int entpool_selftest(void); +void entpool_enter(struct entpool *, const void *, size_t); +bool entpool_enter_nostir(struct entpool *, const void *, size_t); +void entpool_stir(struct entpool *); +void entpool_extract(struct entpool *, void *, size_t); -#endif /* _DEV_RNDPRIVATE_H */ +#endif /* ENTPOOL_H */ diff --git a/sys/rump/dev/lib/librnd/Makefile b/sys/rump/dev/lib/librnd/Makefile index e517b37b1eca..9baa8daaf7b8 100644 --- a/sys/rump/dev/lib/librnd/Makefile +++ b/sys/rump/dev/lib/librnd/Makefile @@ -7,7 +7,7 @@ LIB= rumpdev_rnd COMMENT=/dev/{,u}random IOCONF= RND.ioconf -SRCS= rndpseudo.c +SRCS= random.c SRCS+= rnd_component.c diff --git a/sys/rump/librump/rumpkern/Makefile.rumpkern b/sys/rump/librump/rumpkern/Makefile.rumpkern index eb8cd6d8d3ca..249d0c6e09fd 100644 --- a/sys/rump/librump/rumpkern/Makefile.rumpkern +++ b/sys/rump/librump/rumpkern/Makefile.rumpkern @@ -70,6 +70,7 @@ SRCS+= init_sysctl_base.c \ kern_cfglock.c \ kern_clock.c \ kern_descrip.c \ + kern_entropy.c \ kern_event.c \ kern_hook.c \ kern_ksyms.c \ @@ -82,9 +83,6 @@ SRCS+= init_sysctl_base.c \ kern_rate.c \ kern_reboot.c \ kern_resource.c \ - kern_rndpool.c \ - kern_rndq.c \ - kern_rndsink.c \ kern_rwlock_obj.c \ kern_scdebug.c \ kern_stub.c \ diff --git a/sys/rump/librump/rumpkern/emul.c b/sys/rump/librump/rumpkern/emul.c index 541155a435f5..3f0de83c2488 100644 --- a/sys/rump/librump/rumpkern/emul.c +++ b/sys/rump/librump/rumpkern/emul.c @@ -130,8 +130,6 @@ struct emul emul_netbsd = { .e_sc_autoload = netbsd_syscalls_autoload, }; -cprng_strong_t *kern_cprng; - /* not used, but need the symbols for pointer comparisons */ syncobj_t mutex_syncobj, rw_syncobj; diff --git a/sys/rump/librump/rumpkern/rump.c b/sys/rump/librump/rumpkern/rump.c index 1e62968a9267..6a28abfaeed3 100644 --- a/sys/rump/librump/rumpkern/rump.c +++ b/sys/rump/librump/rumpkern/rump.c @@ -326,10 +326,6 @@ rump_init(void) #endif /* RUMP_USE_CTOR */ rnd_init(); - cprng_init(); - kern_cprng = cprng_strong_create("kernel", IPL_VM, - CPRNG_INIT_ANY|CPRNG_REKEY_ANY); - rump_hyperentropy_init(); procinit(); @@ -397,6 +393,7 @@ rump_init(void) ncpuonline = ncpu; /* Once all CPUs are detected, initialize the per-CPU cprng_fast. */ + cprng_init(); cprng_fast_init(); mp_online = true; diff --git a/sys/sys/compat_stub.h b/sys/sys/compat_stub.h index 0cc0bfeef941..77843f2854c3 100644 --- a/sys/sys/compat_stub.h +++ b/sys/sys/compat_stub.h @@ -308,8 +308,8 @@ MODULE_HOOK(get_emul_sunos_hook, int, (const struct emul **)); /* * Hooks for rnd_ioctl_50 */ -MODULE_HOOK(rnd_ioctl_50_hook, int, (struct file *, u_long, void *)); -MODULE_HOOK(rnd_ioctl32_50_hook, int, (struct file *, u_long, void *)); +MODULE_HOOK(rnd_ioctl_50_hook, int, (u_long, void *)); +MODULE_HOOK(rnd_ioctl32_50_hook, int, (u_long, void *)); /* * Hooks for compat_60 ttioctl and ptmioctl diff --git a/sys/sys/cprng.h b/sys/sys/cprng.h index bc4f9312b22e..b6521028305e 100644 --- a/sys/sys/cprng.h +++ b/sys/sys/cprng.h @@ -41,10 +41,7 @@ #include #include -/* - * NIST SP800-90 says 2^19 bytes per request for the Hash_DRBG. - */ -#define CPRNG_MAX_LEN 524288 +#define CPRNG_MAX_LEN NIST_HASH_DRBG_MAX_REQUEST_BYTES typedef struct cprng_strong cprng_strong_t; @@ -65,11 +62,8 @@ cprng_strong_t * void cprng_strong_destroy(cprng_strong_t *); size_t cprng_strong(cprng_strong_t *, void *, size_t, int); -struct knote; /* XXX temp, for /dev/random */ -int cprng_strong_kqfilter(cprng_strong_t *, struct knote *); /* XXX " */ -int cprng_strong_poll(cprng_strong_t *, int); /* XXX " */ - -extern cprng_strong_t *kern_cprng; +extern cprng_strong_t *kern_cprng; /* IPL_VM */ +extern cprng_strong_t *user_cprng; /* IPL_NONE, thread context only */ static __inline uint32_t cprng_strong32(void) diff --git a/sys/sys/rndsink.h b/sys/sys/entropy.h similarity index 64% rename from sys/sys/rndsink.h rename to sys/sys/entropy.h index beda47abcd1d..2cd5cacd6a2a 100644 --- a/sys/sys/rndsink.h +++ b/sys/sys/entropy.h @@ -1,7 +1,7 @@ -/* $NetBSD: rndsink.h,v 1.1 2013/06/23 02:35:24 riastradh Exp $ */ +/* $NetBSD$ */ /*- - * Copyright (c) 2013 The NetBSD Foundation, Inc. + * Copyright (c) 2019 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation @@ -29,25 +29,36 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _SYS_RNDSINK_H -#define _SYS_RNDSINK_H +#ifndef _SYS_ENTROPY_H +#define _SYS_ENTROPY_H -#ifndef _KERNEL /* XXX */ -#error is meant for kernel consumers only. +#ifndef _KERNEL +#error This header is known to the state of California to cause cancer in users. #endif -#define RNDSINK_MAX_BYTES 32 +#include +#include -struct rndsink; +#include -typedef void rndsink_callback_t(void *, const void *, size_t); +struct knote; -void rndsinks_init(void); -void rndsinks_distribute(void); -struct rndsink * - rndsink_create(size_t, rndsink_callback_t *, void *); -void rndsink_destroy(struct rndsink *); -bool rndsink_request(struct rndsink *, void *, size_t); -void rndsink_schedule(struct rndsink *); +#define ENTROPY_SIZE ENTPOOL_SIZE /* bytes */ +#define ENTROPY_CAPACITY ENTPOOL_CAPACITY /* bytes */ -#endif /* _SYS_RNDSINK_H */ +#define ENTROPY_WAIT 0x01 +#define ENTROPY_SIG 0x02 + +void entropy_init(void); +void entropy_init_late(void); +void entropy_seed(rndsave_t *); +void entropy_bootrequest(void); +unsigned entropy_epoch(void); +int entropy_extract(void *, size_t, int); +int entropy_poll(int); +int entropy_kqfilter(struct knote *); +int entropy_ioctl(unsigned long, void *); + +extern bool entropy_depletion; + +#endif /* _SYS_ENTROPY_H */ diff --git a/sys/sys/rnd.h b/sys/sys/rnd.h index 98df0b6f9908..578b4f5a9305 100644 --- a/sys/sys/rnd.h +++ b/sys/sys/rnd.h @@ -46,6 +46,7 @@ void rnd_init(void); void rnd_init_softint(void); void rnd_seed(void *, size_t); int rnd_system_ioctl(struct file *, u_long, void *); +void rnd_cpu_init(void); extern int rnd_initial_entropy; diff --git a/sys/sys/rndpool.h b/sys/sys/rndpool.h index 87674451b4d5..1948ddf6b200 100644 --- a/sys/sys/rndpool.h +++ b/sys/sys/rndpool.h @@ -1,12 +1,11 @@ /* $NetBSD: rndpool.h,v 1.3 2015/04/14 13:14:20 riastradh Exp $ */ /*- - * Copyright (c) 1997 The NetBSD Foundation, Inc. + * Copyright (c) 2019 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Michael Graff . This code uses ideas and - * algorithms from the Linux driver written by Ted Ts'o. + * by Taylor R. Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,35 +37,10 @@ #endif #include -#include /* rndpoolstat_t */ -/* - * Size of entropy pool in 32-bit words. This _MUST_ be a power of 2. Don't - * change this unless you really know what you are doing... - */ -#ifndef RND_POOLWORDS -#define RND_POOLWORDS 128 -#endif -#define RND_POOLBITS (RND_POOLWORDS * 32) - -typedef struct { - uint32_t cursor; /* current add point in the pool */ - uint32_t rotate; /* how many bits to rotate by */ - rndpoolstat_t stats; /* current statistics */ - uint32_t pool[RND_POOLWORDS]; /* random pool data */ -} rndpool_t; - -/* Mode for rnd_extract_data. */ -#define RND_EXTRACT_ANY 0 /* extract as many bits as requested */ -#define RND_EXTRACT_GOOD 1 /* extract as many bits as we have counted - * entropy */ +#include -void rndpool_init(rndpool_t *); -uint32_t rndpool_get_entropy_count(rndpool_t *); -void rndpool_set_entropy_count(rndpool_t *, uint32_t); -void rndpool_get_stats(rndpool_t *, void *, int); -void rndpool_add_data(rndpool_t *, - const void *const , uint32_t, uint32_t); -uint32_t rndpool_extract_data(rndpool_t *, void *, uint32_t, uint32_t); +/* Legacy name for API compatibility. */ +#define RND_POOLBITS (ENTPOOL_CAPACITY*NBBY) #endif /* _SYS_RNDPOOL_H */ diff --git a/sys/sys/rndsource.h b/sys/sys/rndsource.h index 11e53d632500..9f3e3f1b537c 100644 --- a/sys/sys/rndsource.h +++ b/sys/sys/rndsource.h @@ -1,12 +1,11 @@ /* $NetBSD: rndsource.h,v 1.6 2018/04/19 21:19:07 christos Exp $ */ /*- - * Copyright (c) 1997 The NetBSD Foundation, Inc. + * Copyright (c) 2019 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Michael Graff . This code uses ideas and - * algorithms from the Linux driver written by Ted Ts'o. + * by Taylor R. Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,89 +33,40 @@ #define _SYS_RNDSOURCE_H #ifndef _KERNEL /* XXX */ -#error is meant for kernel consumers only. +#error is meant for kernel consumers only. #endif #include -#include -#include /* RND_TYPE_*, RND_FLAG_* */ -#include #include +#include -typedef struct rnd_delta_estimator { - uint64_t x; - uint64_t dx; - uint64_t d2x; - uint64_t insamples; - uint64_t outbits; -} rnd_delta_t; - -typedef struct krndsource { - LIST_ENTRY(krndsource) list; /* the linked list */ - char name[16]; /* device name */ - rnd_delta_t time_delta; /* time delta estimator */ - rnd_delta_t value_delta; /* value delta estimator */ - uint32_t total; /* entropy from this source */ - uint32_t type; /* type */ - uint32_t flags; /* flags */ - void *state; /* state information */ - size_t test_cnt; /* how much test data accumulated? */ - void (*get)(size_t, void *); /* pool wants N bytes (badly) */ - void *getarg; /* argument to get-function */ - void (*enable)(struct krndsource *, bool); /* turn on/off */ - rngtest_t *test; /* test data for RNG type sources */ - unsigned refcnt; -} krndsource_t; - -static __inline void -rndsource_setcb(struct krndsource *const rs, void (*const cb)(size_t, void *), - void *const arg) -{ - rs->get = cb; - rs->getarg = arg; -} - -static __inline void -rndsource_setenable(struct krndsource *const rs, void *const cb) -{ - rs->enable = cb; -} - -#define RND_ENABLED(rp) \ - (((rp)->flags & RND_FLAG_NO_COLLECT) == 0) +/* + * struct krndsource + * + * State for an entropy source. To be allocated by a driver for a + * hardware entropy source, and treated as opaque. + */ +struct krndsource { + LIST_ENTRY(krndsource) rs_list; /* entry in list of sources */ + char rs_name[16]; /* device name */ + int rs_type; /* type, RND_TYPE_* */ + int rs_flags; /* flags, RND_FLAG_* */ + struct percpu *rs_percpu; /* struct rndsource_cpu */ + unsigned rs_nbits_early; /* bits added while cold */ + void (*rs_request)(size_t, void *); + /* callback to request more */ + void *rs_requestarg; /* cookie for req. callback */ +}; -void _rnd_add_uint32(krndsource_t *, uint32_t); -void _rnd_add_uint64(krndsource_t *, uint64_t); -void rnd_add_data(krndsource_t *, const void *const, uint32_t, - uint32_t); -void rnd_add_data_sync(krndsource_t *, const void *, uint32_t, - uint32_t); -void rnd_attach_source(krndsource_t *, const char *, - uint32_t, uint32_t); -void rnd_detach_source(krndsource_t *); +typedef struct krndsource krndsource_t; -static __inline void -rnd_add_uint32(krndsource_t *kr, uint32_t val) -{ - if (__predict_true(kr)) { - if (RND_ENABLED(kr)) { - _rnd_add_uint32(kr, val); - } - } else { - rnd_add_data(NULL, &val, sizeof(val), 0); - } -} +void rndsource_setcb(struct krndsource *, void (*)(size_t, void *), void *); +void rnd_attach_source(struct krndsource *, const char *, int, int); +void rnd_detach_source(struct krndsource *); -static __inline void -rnd_add_uint64(krndsource_t *kr, uint64_t val) -{ - if (__predict_true(kr)) { - if (RND_ENABLED(kr)) { - _rnd_add_uint64(kr, val); - } - } else { - rnd_add_data(NULL, &val, sizeof(val), 0); - } -} +void rnd_add_uint32(struct krndsource *, uint32_t); +void rnd_add_data(struct krndsource *, const void *, uint32_t, uint32_t); +void rnd_add_data_sync(struct krndsource *, const void *, uint32_t, + uint32_t); #endif /* _SYS_RNDSOURCE_H */ From 210a81c9fb896eaffcee84e7968757abcbd8778a Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 9 Aug 2019 03:40:22 +0000 Subject: [PATCH 3/4] WIP: cpu_rng rndsource --- sys/kern/kern_cpu_rng.c | 82 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 sys/kern/kern_cpu_rng.c diff --git a/sys/kern/kern_cpu_rng.c b/sys/kern/kern_cpu_rng.c new file mode 100644 index 000000000000..ac45d6b8f4ef --- /dev/null +++ b/sys/kern/kern_cpu_rng.c @@ -0,0 +1,82 @@ +/* $NetBSD$ */ + +/*- + * Copyright (c) 2019 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Taylor R. Campbell. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include + +#ifdef __HAVE_CPU_RNG + +#include +#include + +#include + +static struct krndsource rnd_cpu_source; + +static void +rnd_cpu_get(size_t nbytes, void *cookie) +{ + /* + * Assume CPU RNG gives at least 1/2 bit of entropy per bit of + * output. (XXX Document this somewhere...) + */ + cpu_rng_t buf[2*howmany(RND_POOLBITS, CHAR_BIT*sizeof(cpu_rng_t))]; + unsigned i, nbits = 0; + + for (i = 0; i < sizeof buf; i++) + nbits += cpu_rng(&buf[i]); + rnd_add_data(&rnd_cpu_source, buf, sizeof buf, nbits); +} + +void +rnd_cpu_init(void) +{ + + if (!cpu_rng_init()) { + printf("rnd: unable to to initialize CPU RNG\n"); + return; + } + + rndsource_setcb(&rnd_cpu_source, rnd_cpu_get, NULL); + rnd_attach_source(&rnd_cpu_source, "cpurng", RND_TYPE_RNG, + RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB); +} + +#else + +void +rnd_cpu_init(void) +{ +} + +#endif From 030f824cf10cca04fbcbd171ca7cee134fa61834 Mon Sep 17 00:00:00 2001 From: Taylor R Campbell Date: Fri, 9 Aug 2019 03:40:47 +0000 Subject: [PATCH 4/4] WIP: getentropy(2), borken --- lib/libc/sys/getentropy.2 | 109 +++++++++++++++++++++++++++++++++++++ sys/kern/sys_getentropy.c | 110 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 219 insertions(+) create mode 100644 lib/libc/sys/getentropy.2 create mode 100644 sys/kern/sys_getentropy.c diff --git a/lib/libc/sys/getentropy.2 b/lib/libc/sys/getentropy.2 new file mode 100644 index 000000000000..7f78719e1239 --- /dev/null +++ b/lib/libc/sys/getentropy.2 @@ -0,0 +1,109 @@ +.\" $NetBSD$ +.\" +.\" Copyright (c) 2019 The NetBSD Foundation, Inc. +.\" All rights reserved. +.\" +.\" This code is derived from software contributed to The NetBSD Foundation +.\" by Taylor R. Campbell. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS +.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS +.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +.\" POSSIBILITY OF SUCH DAMAGE. +.\" +.Dd July 21, 2019 +.Dt GETENTROPY 2 +.Os +.Sh NAME +.Nm getentropy +.Nd wait for unpredictable secrets and randomly fill buffer +.Sh LIBRARY +.Lb libc +.Sh SYNOPSIS +.In unistd.h +.Ft int +.Fn getentropy "void *buf" "size_t len" +.Sh DESCRIPTION +The +.Fn getentropy +system call blocks until the operating system to be initialized with an +unpredictable secret seed, and then fills +.Fa buf +with +.Fa len +uniform random bytes fit for use in cryptography. +.Pp +The length +.Fa len +must not exceed 256, or else +.Fn getentropy +will fail with +.Er EIO . +For larger amounts of data, callers should draw a 32-byte key and +expand it with a stream cipher such as ChaCha, as +.Xr arc4random 3 +does. +.Sh RETURN VALUES +.Rv -std getentropy +.Sh ERRORS +.Fn getentropy +will fail if: +.Bl -tag -width [EFAULT] +.It Bq Er EFAULT +Part of +.Fa buf +points outside the process's allocated address space. +.It Bq Er EINTR +A signal was delivered before +.Fn getentropy +could complete. +.It Bq Er EIO +The +.Fa len +argument exceeded 256. +.El +.Sh SEE ALSO +.Xr arc4random 3 , +.Xr rnd 4 , +.Xr boot.cfg 5 , +.Xr cprng 9 , +.Xr rnd 9 +.Sh HISTORY +The +.Fn getentropy +function first appeared in +.Ox 5.6 . +.Sh CAVEATS +Applications in which blocking may affect correctness should take care +to test code paths using +.Fn getentropy +with a sort of +.Dq fault injection +that sleeps. +Otherwise, even during what might appear to be extensive testing, the +blocking behaviour of +.Fn getentropy +is unlikely to be observed, because it usually happens only very early +on at boot. +.Sh BUGS +There is no way to do a multiplexed wait for unpredictability, like +with +.Xr select 2 , +.Xr kqueue 2 , +etc. diff --git a/sys/kern/sys_getentropy.c b/sys/kern/sys_getentropy.c new file mode 100644 index 000000000000..9966d17f4b97 --- /dev/null +++ b/sys/kern/sys_getentropy.c @@ -0,0 +1,110 @@ +/* $NetBSD$ */ + +/*- + * Copyright (c) 2019 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Taylor R. Campbell. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +#include + +#define GETENTROPY_MAX 256 + +CTASSERT(GETENTROPY_MAX <= CPRNG_MAX_LEN); + +/* + * System call: getentropy(buf, len) + * + * Wait until the entropy pool has been seeded, and write + * min(256,len) unpredictable secret bytes into buf. Restartable; + * can fail only with EINTR. + */ +int +sys_getentropy(struct lwp *l, const struct sys_getentropy_args *uap, + register_t *retval) +{ + /* { + syscallarg(void *) buf; + syscallarg(size_t) len; + } */ + struct cprng_strong *cprng; + uint8_t buf[GETENTROPY_MAX]; + size_t len = SCARG(uap, len); + int error; + + /* + * Reject large requests in order to limit time spent in + * kernel. You can always expand a short key from getentropy + * into a long pad with cryptography in userland. + */ + if (len > GETENTROPY_MAX) + return EIO; + + /* Wait until the entropy pool has been seeded. */ + error = rnd_wait_for_initial_entropy(); + if (error) + return error; + + /* + * Create a CPRNG and draw output from it. Flags: + * + * . CPRNG_HARD: use AES128_k0(0) || AES128_k1(1) where + * k0,k1 are drawn separately from the internal + * >>128-bit entropy pool, instead of AES128_k(0) || + * AES128_k(1), where k is a single 128-bit quantity. + * (If we just used AES-256, or ChaCha, this wouldn't be + * an issue.) + * + * . CPRNG_INIT_ANY: Don't print a warning if the entropy + * pool appears to be unseeded in cprng_strong_create. + * We already waited for it to be seeded and the caller + * is _not_ concerned with disclosure of kernel memory + * -- only with a predictable initial state. + * + * . CPRNG_REKEY_ANY: Don't print a warning, or sleep, if + * the entropy pool appears to be unseeded in + * cprng_strong (because of `entropy depletion'). We + * already waited for it to be seeded and the caller is + * _not_ concerned with disclosure of kernel memory -- + * only with a predictable initial state. + */ + cprng = cprng_strong_create("getentropy", IPL_NONE, + CPRNG_HARD|CPRNG_INIT_ANY|CPRNG_REKEY_ANY); + cprng_strong(cprng, buf, SCARG(uap, len), 0); + cprng_strong_destroy(cprng); + + /* Copy the output to userland now that we've drawn it. */ + error = copyout(SCARG(uap, buf), buf, len); + if (error) + return error; + + /* Success! */ + return 0; +}