/* * Untested stress test for an imaginary concurrent finite map API, * 2014-08-28. Usage: * * unsigned nthreads = 24; * char seed[] = "my seed"; * * stress(nthreads, seed, sizeof seed); */ #include #include #include #include #include #include #include #include #include #include #include #include #include #define container_of(PTR, TYPE, FIELD) \ ((void)sizeof((PTR) - \ &((TYPE *)(((char *)(PTR)) - \ offsetof(TYPE, FIELD)))->FIELD), \ ((TYPE *)(((char *)(PTR)) - offsetof(TYPE, FIELD)))) /* Map API */ struct map { uint64_t map_crap; }; struct ent { uint64_t ent_crud; }; void map_init(struct map *); void map_destroy(struct map *); void map_read_enter(struct map *); void map_read_exit(struct map *); struct ent * map_lookup(struct map *, const void *, size_t); #ifdef MAP_LOOKUP_GEQ_GT struct ent * map_lookup_geq(struct map *, const void *, size_t); struct ent * map_lookup_gt(struct map *, const void *, size_t); #endif bool map_insert(struct map *, const void *, size_t, struct ent *); void map_delete(struct map *, const void *, size_t, struct ent *); /* * Other possibilities. These need to be done in a read transaction so * you can use the existing entry, whether it stays in the map * (map_insert_lookup) or gets replaced (map_replace_lookup). */ struct ent * map_insert_lookup(struct map *, const void *, size_t, struct ent *); struct ent * map_replace_lookup(struct map *, const void *, size_t, struct ent *); /* ChaCha8 core, for PRNG */ #define crypto_core_ROUNDS 8 typedef uint32_t uint32; static uint32 rotate(uint32 u,int c) { return (u << c) | (u >> (32 - c)); } #define QUARTERROUND(a,b,c,d) do { \ (a) += (b); (d) ^= (a); (d) = rotate((d), 16); \ (c) += (d); (b) ^= (c); (b) = rotate((b), 12); \ (a) += (b); (d) ^= (a); (d) = rotate((d), 8); \ (c) += (d); (b) ^= (c); (b) = rotate((b), 7); \ } while (0) void crypto_core( uint32 *out, const uint32 *in, const uint32 *k, const uint32 *c ) { uint32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15; uint32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15; int i; j0 = x0 = c[0]; j1 = x1 = c[1]; j2 = x2 = c[2]; j3 = x3 = c[3]; j4 = x4 = k[0]; j5 = x5 = k[1]; j6 = x6 = k[2]; j7 = x7 = k[3]; j8 = x8 = k[4]; j9 = x9 = k[5]; j10 = x10 = k[6]; j11 = x11 = k[7]; j12 = x12 = in[0]; j13 = x13 = in[1]; j14 = x14 = in[2]; j15 = x15 = in[3]; for (i = crypto_core_ROUNDS;i > 0;i -= 2) { QUARTERROUND( x0, x4, x8,x12); QUARTERROUND( x1, x5, x9,x13); QUARTERROUND( x2, x6,x10,x14); QUARTERROUND( x3, x7,x11,x15); QUARTERROUND( x0, x5,x10,x15); QUARTERROUND( x1, x6,x11,x12); QUARTERROUND( x2, x7, x8,x13); QUARTERROUND( x3, x4, x9,x14); } out[0] = x0 + j0; out[1] = x1 + j1; out[2] = x2 + j2; out[3] = x3 + j3; out[4] = x4 + j4; out[5] = x5 + j5; out[6] = x6 + j6; out[7] = x7 + j7; out[8] = x8 + j8; out[9] = x9 + j9; out[10] = x10 + j10; out[11] = x11 + j11; out[12] = x12 + j12; out[13] = x13 + j13; out[14] = x14 + j14; out[15] = x15 + j15; } /* ChaCha8-based PRNG */ struct random64 { uint32_t r_key[8]; uint32_t r_nonce[4]; union { uint32_t u32[16]; uint64_t u64[8]; } r_buf; unsigned r_nbuf; }; static void SHA256(uint8_t hash[SHA256_DIGEST_LENGTH], const void *buf, size_t size) { SHA256_CTX ctx; SHA256_Init(&ctx); SHA256_Update(&ctx, buf, size); SHA256_Final(hash, &ctx); } static void random64_init(struct random64 *r, const void *seed, size_t seedlen) { uint8_t key[32]; __CTASSERT(SHA256_DIGEST_LENGTH == sizeof key); __CTASSERT(sizeof key == sizeof r->r_key); SHA256(key, seed, seedlen); (void)memcpy(r->r_key, key, sizeof key); } static uint64_t random64(struct random64 *r) { if (__predict_false(r->r_nbuf == 0)) { const uint32_t sigma[4] = { /* `expand 32-byte k' */ 0x61707865U, 0x3320646eU, 0x79622d32U, 0x6b206574U, }; crypto_core(r->r_buf.u32, r->r_nonce, r->r_key, sigma); r->r_nbuf = 8; if (r->r_nonce[0]++ == 0) r->r_nonce[1]++; } return r->r_buf.u64[--r->r_nbuf]; } #define check(E) do { \ if (!__predict_true(E)) \ errx(1, "check failed in %s: %s:%d: %s\n", __func__, \ __FILE__, __LINE__, #E); \ } while (0) #define MAXITER 54321 #define MAXKEY 100000000000ULL #define KEYSTEP 12345 struct entry { struct ent e_ent; uint64_t e_key; void *e_datum0; void *e_datum1; volatile unsigned e_refcnt; }; static void clobber(struct entry *, uint64_t); static void clobber_impl(struct entry *, uint64_t); static void (*volatile clobber_fn)(struct entry *, uint64_t) = &clobber_impl; static volatile unsigned nodecnt = 0; static int hold(struct entry *entry) { unsigned refcnt; do { refcnt = entry->e_refcnt; if (refcnt == UINT_MAX) return EBUSY; } while (atomic_cas_uint(&entry->e_refcnt, refcnt, (refcnt + 1)) != refcnt); return 0; } static bool rele(struct entry *entry) { return (atomic_dec_uint_nv(&entry->e_refcnt) == 0); } static void rele_notlast(struct entry *entry) { check(!rele(entry)); } static void rele_free(struct entry *entry) { if (rele(entry)) { check(atomic_dec_uint_nv(&nodecnt) < UINT_MAX); clobber(entry, entry->e_key); free(entry); } } static void check_datum(struct entry *entry) { check(entry->e_datum0 != entry->e_datum1); check((entry->e_datum0 == &entry->e_datum0) || (entry->e_datum0 == &entry->e_datum1)); check((entry->e_datum1 == &entry->e_datum0) || (entry->e_datum1 == &entry->e_datum1)); } static void check_entry(struct entry *entry, uint64_t key) { assert((key % KEYSTEP) == 0); check(entry->e_key == key); check_datum(entry); membar_consumer(); check(entry->e_key == key); check_datum(entry); } #ifdef MAP_LOOKUP_GEQ_GT static void check_geq(struct entry *entry, uint64_t key) { check((entry->e_key % KEYSTEP) == 0); check(entry->e_key >= key); check_datum(entry); membar_consumer(); check(entry->e_key >= key); check_datum(entry); } #endif #ifdef MAP_LOOKUP_GEQ_GT static void check_gt(struct entry *entry, uint64_t key) { check((entry->e_key % KEYSTEP) == 0); check(entry->e_key > key); check_datum(entry); membar_consumer(); check(entry->e_key > key); check_datum(entry); } #endif static void check_lookup(struct map *map, uint64_t key) { struct ent *ent; struct entry *entry; #ifdef MAP_LOOKUP_GEQ_GT struct entry *next; #endif map_read_enter(map); ent = map_lookup(map, &key, sizeof key); if (ent == NULL) goto out; entry = container_of(ent, struct entry, e_ent); check_entry(entry, key); #ifdef MAP_LOOKUP_GEQ_GT ent = map_lookup_gt(map, &entry->e_key, sizeof entry->e_key); if (ent == NULL) goto out; next = container_of(ent, struct entry, e_ent); check_gt(next, key); #endif out: map_read_exit(map); } #ifdef MAP_LOOKUP_GEQ_GT static void check_lookup_geq(struct map *map, uint64_t key) { struct ent *ent; struct entry *entry; map_read_enter(map); ent = map_lookup_geq(map, &key, sizeof key); if (ent == NULL) goto out; entry = container_of(ent, struct entry, e_ent); check_geq(entry, key); out: map_read_exit(map); } #endif static void check_lookup_miss(struct map *map, uint64_t key) { struct ent *ent; map_read_enter(map); ent = map_lookup(map, &key, sizeof key); check(ent == NULL); map_read_exit(map); } static void check_insert(struct map *map, uint64_t key) { struct entry *entry; assert((key % KEYSTEP) == 0); entry = malloc(sizeof(*entry)); if (entry == NULL) return; entry->e_key = key; entry->e_datum0 = &entry->e_datum0; entry->e_datum1 = &entry->e_datum1; entry->e_refcnt = 1; if (!map_insert(map, &key, sizeof key, &entry->e_ent)) { clobber(entry, key); free(entry); return; } check(atomic_inc_uint_nv(&nodecnt) <= howmany(MAXKEY, KEYSTEP)); } static void check_reinsert(struct map *map, uint64_t key) { struct ent *ent; struct entry *entry; void *t; map_read_enter(map); ent = map_lookup(map, &key, sizeof key); if (ent == NULL) { map_read_exit(map); return; } entry = container_of(ent, struct entry, e_ent); if (hold(entry) != 0) { map_read_exit(map); return; } map_read_exit(map); check_entry(entry, key); map_delete(map, &key, sizeof key, &entry->e_ent); rele_notlast(entry); /* release our reference */ if (rele(entry)) { /* release map's reference */ /* We have exclusive access now. Twiddle an invariant. */ t = entry->e_datum0; entry->e_datum0 = entry->e_datum1; entry->e_datum1 = t; check(hold(entry) == 0); if (!map_insert(map, &key, sizeof key, &entry->e_ent)) rele_free(entry); } } static void check_delete(struct map *map, uint64_t key) { struct ent *ent; struct entry *entry; map_read_enter(map); ent = map_lookup(map, &key, sizeof key); if (ent == NULL) { map_read_exit(map); return; } entry = container_of(ent, struct entry, e_ent); check_entry(entry, key); if (hold(entry) != 0) { map_read_exit(map); return; } map_read_exit(map); check_entry(entry, key); map_delete(map, &key, sizeof key, &entry->e_ent); rele_notlast(entry); /* release map's reference */ rele_free(entry); /* release our reference */ } #ifdef MAP_LOOKUP_GEQ_GT static void check_delete_geq(struct map *map, uint64_t key) { struct ent *ent; struct entry *entry; map_read_enter(map); ent = map_lookup_geq(map, &key, sizeof key); if (ent == NULL) { map_read_exit(map); return; } entry = container_of(ent, struct entry, e_ent); check_geq(entry, key); if (hold(entry) != 0) { map_read_exit(map); return; } map_read_exit(map); check_geq(entry, key); map_delete(map, &entry->e_key, sizeof entry->e_key, &entry->e_ent); rele_notlast(entry); /* release map's reference */ rele_free(entry); /* release our reference */ } #endif static void clobber(struct entry *entry, uint64_t key) { (*clobber_fn)(entry, key); } static void clobber_impl(struct entry *entry, uint64_t key) { check_entry(entry, key); entry->e_key--; entry->e_datum0 = &entry->e_key; entry->e_datum1 = &entry->e_key; membar_producer(); } static void test_seqlookup(struct map *map, struct random64 *r __unused) { uint64_t key; for (key = 0; key <= MAXKEY; key += KEYSTEP) check_lookup(map, key); } #ifdef MAP_LOOKUP_GEQ_GT static void test_seqlookup_geq(struct map *map, struct random64 *r __unused) { unsigned i; uint64_t key; for (i = 0; i < KEYSTEP; i++) { for (key = -i; key <= MAXKEY; key += KEYSTEP) check_lookup_geq(map, key); } } #endif static void test_seqlookup_miss(struct map *map, struct random64 *r __unused) { unsigned i; uint64_t key; for (i = 1; i < KEYSTEP; i++) { for (key = -i; key <= MAXKEY; key += KEYSTEP) check_lookup_miss(map, key); } } static void test_seqinsert(struct map *map, struct random64 *r __unused) { uint64_t key; for (key = 0; key <= MAXKEY; key += KEYSTEP) check_insert(map, key); } static void test_seqreinsert(struct map *map, struct random64 *r __unused) { uint64_t key; for (key = 0; key <= MAXKEY; key += KEYSTEP) check_reinsert(map, key); } static void test_seqdelete(struct map *map, struct random64 *r __unused) { uint64_t key; for (key = 0; key <= MAXKEY; key += KEYSTEP) check_delete(map, key); } static void test_rndlookup(struct map *map, struct random64 *r) { unsigned i; uint64_t key; for (i = 0; i <= howmany(MAXKEY, KEYSTEP); i++) { key = random64(r); if (key % KEYSTEP) check_lookup_miss(map, key); else check_lookup(map, key); } } #ifdef MAP_LOOKUP_GEQ_GT static void test_rndlookup_geq(struct map *map, struct random64 *r) { unsigned i; for (i = 0; i <= howmany(MAXKEY, KEYSTEP); i++) check_lookup_geq(map, random64(r)); } #endif static void test_rndinsert(struct map *map, struct random64 *r) { unsigned i; for (i = 0; i <= howmany(MAXKEY, KEYSTEP); i++) check_insert(map, rounddown(random64(r), KEYSTEP)); } static void test_rndreinsert(struct map *map, struct random64 *r) { unsigned i; for (i = 0; i <= howmany(MAXKEY, KEYSTEP); i++) check_reinsert(map, rounddown(random64(r), KEYSTEP)); } static void test_rnddelete(struct map *map, struct random64 *r) { unsigned i; for (i = 0; i <= howmany(MAXKEY, KEYSTEP); i++) check_delete(map, rounddown(random64(r), KEYSTEP)); } #ifdef MAP_LOOKUP_GEQ_GT static void test_rnddelete_geq(struct map *map, struct random64 *r) { unsigned i; for (i = 0; i <= howmany(MAXKEY, KEYSTEP); i++) check_delete_geq(map, random64(r)); } #endif #ifdef MAP_LOOKUP_GEQ_GT static void test_iterate(struct map *map, struct random64 *r __unused) { struct ent *ent; struct entry *entry; uint64_t key = 0; unsigned i; map_read_enter(map); ent = map_lookup_geq(map, &key, sizeof key); if (ent == NULL) { /* XXX How about guaranteeing always at least one entry? */ map_read_exit(map); return; } entry = container_of(ent, struct entry, e_ent); check_geq(entry, key); key = entry->e_key; map_read_exit(map); for (i = 0; i <= howmany(MAXKEY, KEYSTEP); i++) { map_read_enter(map); ent = map_lookup_gt(map, &key, sizeof key); if (ent == NULL) { map_read_exit(map); return; } check_gt(entry, key); key = entry->e_key; map_read_exit(map); } check(!"too many entries"); } #endif static void (*const tests[])(struct map *, struct random64 *) = { &test_seqlookup, #ifdef MAP_LOOKUP_GEQ_GT &test_seqlookup_geq, #endif &test_seqlookup_miss, &test_seqinsert, &test_seqreinsert, &test_seqdelete, &test_rndlookup, #ifdef MAP_LOOKUP_GEQ_GT &test_rndlookup_geq, #endif &test_rndinsert, &test_rndreinsert, &test_rnddelete, #ifdef MAP_LOOKUP_GEQ_GT &test_rnddelete_geq, &test_iterate, #endif }; struct stress { void (*s_test)(struct map *, struct random64 *); struct map *s_map; struct random64 s_random64; pthread_t s_thread; }; static void * stress_thread(void *arg) { struct stress *const s = arg; unsigned i; for (i = 0; i < MAXITER; i++) (*s->s_test)(s->s_map, &s->s_random64); return NULL; } void stress(unsigned nthreads, const void *global_seed, size_t global_seedlen) { struct map map; struct stress *s; uint8_t global_key[SHA256_DIGEST_LENGTH]; uint8_t seed[sizeof global_key + sizeof(uint64_t)]; unsigned i; int error; SHA256(global_key, global_seed, global_seedlen); (void)memcpy(seed, global_key, sizeof global_key); map_init(&map); s = calloc(nthreads, sizeof(*s)); if (s == NULL) err(1, "calloc"); for (i = 0; i < nthreads; i++) { s[i].s_test = tests[i % __arraycount(tests)]; s[i].s_map = ↦ le64enc(seed + sizeof global_key, i); random64_init(&s[i].s_random64, seed, sizeof seed); error = pthread_create(&s[i].s_thread, NULL, &stress_thread, &s[i]); if (error) { errno = error; err(1, "pthread_create"); } } for (i = 0; i < nthreads; i++) { error = pthread_join(s[i].s_thread, NULL); if (error) { errno = error; err(1, "pthread_join"); } } free(s); map_destroy(&map); check(nodecnt == 0); }