/*- * Copyright (c) 2015--2017 Taylor R. Campbell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * XXX XXX XXX * * DRAFT BROKEN critbit tree with parallelized insert BROKEN DRAFT * * XXX XXX XXX */ #ifdef _KERNEL #include #include #define assert KASSERT #else /* !_KERNEL */ #include #include #include #include #include #ifndef __diagused #ifdef NDEBUG #define __diagused __unused #else #define __diagused #endif #endif #if CRITBIT_DEBUG #define __dbgused #else #define __dbgused __unused #endif #endif /* _KERNEL */ #include "container_of.h" #include "critbit.h" static inline uintptr_t atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t o, uintptr_t n) { return (uintptr_t)atomic_cas_ptr(p, (void *)o, (void *)n); } #define CRITBIT_INDEX ((uint32_t)__BITS(0,15)) #define CRITBIT_BITS ((uint32_t)__BITS(16,23)) #define CRITBIT_LOCK ((uint32_t)__BIT(24)) #define CRITBIT_MAGIC ((uint32_t)__BITS(25,31)) static inline uint32_t makemeta(uint16_t index, uint8_t bits) { return __SHIFTIN(index, CRITBIT_INDEX) | __SHIFTIN(bits, CRITBIT_BITS); } static inline uint16_t getindex(uint32_t meta) { return __SHIFTOUT(meta, CRITBIT_INDEX); } static inline uint8_t getbits(uint32_t meta) { return __SHIFTOUT(meta, CRITBIT_BITS); } static bool branch_trylock(struct critbit_branch *B) { uint32_t ometa, nmeta; do { ometa = B->cbb_meta; if (ometa & CRITBIT_LOCK) return false; nmeta = ometa | CRITBIT_LOCK; } while (atomic_cas_32(&B->cbb_meta, ometa, nmeta) != ometa); return true; } static void branch_lock(struct critbit_branch *B) { while (!branch_trylock(B)) spinwait(); } static void branch_unlock(struct critbit_branch *B) { atomic_and_32(&B->cbb_meta, ~CRITBIT_LOCK); } #if CRITBIT_DEBUG #define DBG(x) x #define CRITBIT_MAGIC_INIT 0x40 /* initialized */ #define CRITBIT_MAGIC_INSERTED 0x43 /* inserted and not deleted */ #define CRITBIT_MAGIC_PENDING 0x4A /* deleted but not committed */ #define CRITBIT_MAGIC_DELETED 0x49 /* deleted, _maybe_ committed */ #define CRITBIT_MAGIC_DEAD 0x4F /* destroyed */ static inline void setmagic(struct critbit_node *N, uint8_t magic) { uint32_t ometa, nmeta; do { ometa = N->cbn_branch.cbb_meta; nmeta = (ometa & ~CRITBIT_MAGIC) | nmagic; } while (atomic_cas_32(&N->cbn_branch.cbb_meta, ometa, nmeta) != ometa); } #define N_INIT(N) (setmagic((N), CRITBIT_MAGIC_INIT)) #define N_INSERTED(N) (setmagic((N), CRITBIT_MAGIC_INSERTED)) #define N_PENDING(N) (setmagic((N), CRITBIT_MAGIC_PENDING)) #define N_DELETED(N) (setmagic((N), CRITBIT_MAGIC_DELETED)) #define N_DEAD(N) (setmagic((N), CRITBIT_MAGIC_DEAD)) #define N_ASSERT_INSERTED(N) B_ASSERT_INSERTED(&(N)->cbn_branch) #define N_ASSERT_MAYBE_INSERTED(N) B_ASSERT_MAYBE_INSERTED(&(N)->cbn_branch) #define N_ASSERT_NOT_INSERTED(N) B_ASSERT_NOT_INSERTED(&(N)->cbn_branch) #define N_ASSERT_PENDING(N) B_ASSERT_PENDING(&(N)->cbn_branch) #define N_ASSERT_DELETED(N) B_ASSERT_DELETED(&(N)->cbn_branch) #define N_ASSERT_NOT_COMMITTED(N) B_ASSERT_NOT_COMMITTED(&(N)->cbn_branch) #define B_MAGIC(B) ((B)->cbb_meta & CRITBIT_MAGIC) #define B_ASSERT_INSERTED(B) \ assert(B_MAGIC(B) == CRITBIT_MAGIC_INSERTED) #define B_ASSERT_MAYBE_INSERTED(B) \ assert(B_MAGIC(B) == CRITBIT_MAGIC_INSERTED || \ B_MAGIC(B) == CRITBIT_MAGIC_PENDING || \ B_MAGIC(B) == CRITBIT_MAGIC_DELETED) #define B_ASSERT_NOT_INSERTED(B) \ assert(B_MAGIC(B) == CRITBIT_MAGIC_INIT || \ B_MAGIC(B) == CRITBIT_MAGIC_DELETED) #define B_ASSERT_PENDING(B) \ assert(B_MAGIC(B) == CRITBIT_MAGIC_PENDING) #define B_ASSERT_DELETED(B) \ assert(B_MAGIC(B) == CRITBIT_MAGIC_DELETED) #define B_ASSERT_NOT_COMMITTED(B) \ assert(B_MAGIC(B) == CRITBIT_MAGIC_INSERTED || \ B_MAGIC(B) == CRITBIT_MAGIC_PENDING) #else /* !CRITBIT_DEBUG */ #define DBG(x) ((void)0) #endif #define MIN(x, y) ((x) < (y) ? (x) : (y)) /* Tagged child pointers */ static bool isbranch(uintptr_t child) { return (child & 1) == 1; } static bool __diagused isleaf(uintptr_t child) { return (child & 1) == 0; } static uintptr_t branch2child(struct critbit_branch *B) { assert((uintptr_t)B != 0); assert(((uintptr_t)B & 1) == 0); return (uintptr_t)B | 1; } static uintptr_t leaf2child(struct critbit_node *N) { assert((uintptr_t)N != 0); assert(((uintptr_t)N & 1) == 0); return (uintptr_t)N; } static struct critbit_branch * child2branch(uintptr_t child) { struct critbit_branch *B; assert((child & 1) == 1); B = (struct critbit_branch *)(child - 1); assert(B != NULL); return B; } static struct critbit_node * child2leaf(uintptr_t child) { struct critbit_node *N; assert((child & 1) == 0); N = (struct critbit_node *)child; assert(N != NULL); return N; } /* Direction and iteration paths */ static inline unsigned revdir(unsigned dir) { /* * Could use 1 - dir instead, but that usually takes one more * instruction on common CPU architectures, since subtraction * instructions usually accept a immediate subtrahend but not * an immediate minuend, while xor is commutative so it doesn't * matter. */ return 1 ^ dir; } static inline unsigned getdir(unsigned char *S, unsigned d) { const unsigned i = d / CHAR_BIT; const unsigned j = d % CHAR_BIT; assert(d <= CHAR_BIT*CRITBIT_MAX_KEYLEN); return 1 & (S[i] >> j); } static inline void setdir(unsigned char *S, unsigned d, unsigned dir) { const unsigned i = d / CHAR_BIT; const unsigned j = d % CHAR_BIT; assert(d <= CHAR_BIT*CRITBIT_MAX_KEYLEN); assert(dir == (dir & 1)); S[i] &= ~(1 << j); S[i] |= dir << j; } /* Critbit tree and node creation and destruction */ void critbit_init(struct critbit *C, const struct critbit_ops *ops) { C->cb_root = 0; C->cb_ops = ops; } void critbit_destroy(struct critbit *C) { assert(C->cb_root == 0); assert(C->cb_ops != NULL); C->cb_root = -1; C->cb_ops = NULL; } void critbit_node_init(struct critbit_node *N __dbgused __diagused, const void *key __diagused, size_t keylen __diagused) { assert(((uintptr_t)N & 1) == 0); assert(((uintptr_t)&N->cbn_branch & 1) == 0); assert(key != NULL); assert(0 < keylen); assert(keylen <= CRITBIT_MAX_KEYLEN); DBG(N_INIT(N)); } void critbit_node_destroy(struct critbit_node *N __dbgused) { DBG(N_ASSERT_NOT_INSERTED(N)); N->cbn_branch.cbb_child[0] = 12345; N->cbn_branch.cbb_child[1] = 54321; N->cbn_branch.cbb_meta = makemeta(65535, 0x53); DBG(N_DEAD(N)); } static const uint8_t * critbit_key(const struct critbit *C, const struct critbit_node *N, size_t *keylenp) { const uint8_t *k; k = (*C->cb_ops->cbo_key)(C, N, keylenp); assert(k != NULL); assert(*keylenp <= CRITBIT_MAX_KEYLEN); return k; } /* Min/max and lookup */ struct critbit_node * critbit_min(const struct critbit *C) { struct critbit_branch *B; struct critbit_node *N; uintptr_t child; if ((child = C->cb_root) == 0) return NULL; for (; isbranch(child); child = B->cbb_child[0]) { B = child2branch(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); DBG(B_ASSERT_MAYBE_INSERTED(B)); } assert(isleaf(child)); N = child2leaf(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); DBG(N_ASSERT_MAYBE_INSERTED(N)); return N; } struct critbit_node * critbit_max(const struct critbit *C) { struct critbit_branch *B; struct critbit_node *N; uintptr_t child; if ((child = C->cb_root) == 0) return NULL; for (; isbranch(child); child = B->cbb_child[1]) { B = child2branch(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); DBG(B_ASSERT_MAYBE_INSERTED(B)); } assert(isleaf(child)); N = child2leaf(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); DBG(N_ASSERT_MAYBE_INSERTED(N)); return N; } struct critbit_node * critbit_lookup(const struct critbit *C, const void *key, size_t keylen) { struct critbit_node *N; size_t keylen0; assert(keylen <= CRITBIT_MAX_KEYLEN); /* Find the first node with the requested key as a prefix. */ N = critbit_lookup_prefix(C, key, keylen); if (N == NULL) return NULL; /* Reject if it is not an exact match. */ (void)critbit_key(C, N, &keylen0); if (keylen0 != keylen) return NULL; /* Success! */ return N; } /* Lookup prefix */ struct critbit_node * critbit_lookup_prefix(const struct critbit *C, const void *key, size_t keylen) { const uint8_t *const k = key; const struct critbit_branch *B; uintptr_t child; unsigned dir; struct critbit_node *N; const uint8_t *k0; size_t keylen0; size_t i; assert(keylen <= CRITBIT_MAX_KEYLEN); /* If the tree is empty, not there. */ if ((child = C->cb_root) == 0) return NULL; /* Find the first candidate match. */ for (; isbranch(child); child = B->cbb_child[dir]) { uint32_t meta; unsigned byte; B = child2branch(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); meta = B->cbb_meta; DBG(B_ASSERT_MAYBE_INSERTED(B)); byte = (getindex(meta) < keylen? k[getindex(meta)] : 0); dir = (1 + (byte | getbits(meta))) >> 8; } /* Get the node. */ assert(isleaf(child)); N = child2leaf(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); DBG(N_ASSERT_MAYBE_INSERTED(N)); k0 = critbit_key(C, N, &keylen0); /* Reject if the prefix doesn't match. */ if (keylen0 < keylen) return NULL; for (i = 0; i < keylen; i++) if (k[i] != k0[i]) return NULL; /* Success! */ return N; } /* Insert */ struct critbit_node * critbit_insert(struct critbit *C, struct critbit_node *N) { size_t keylen, keylen0; const uint8_t *k, *k0; struct critbit_branch *B, *B0; volatile uintptr_t *childp; uintptr_t child; unsigned dir, byte = 0 /*XXXGCC*/, newdir, newbyte; struct critbit_node *N0; uint32_t i, diff, newbits; DBG(N_ASSERT_NOT_INSERTED(N)); /* Get the key. */ k = critbit_key(C, N, &keylen); assert(k != NULL); assert(keylen <= CRITBIT_MAX_KEYLEN); restart: /* If the tree is empty, insert it at the root. */ if ((child = C->cb_root) == 0) { DBG(N_INSERTED(N)); (*C->cb_ops->cbo_membar_producer)(); if (atomic_cas_uintptr(&C->cb_root, child, leaf2child(N)) != child) goto restart; DBG(N_ASSERT_MAYBE_INSERTED(N)); return N; /* inserted */ } /* Find the first candidate match. */ for (; isbranch(child); child = B->cbb_child[dir]) { uint32_t meta; B = child2branch(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); meta = B->cbb_meta; DBG(B_ASSERT_MAYBE_INSERTED(B)); byte = (getindex(meta) < keylen? k[getindex(meta)] : 0); dir = (1 + (byte | getbits(meta))) >> 8; } /* Get the candidate node. */ assert(isleaf(child)); N0 = child2leaf(child); DBG(N_ASSERT_MAYBE_INSERTED(N0)); k0 = critbit_key(C, N0, &keylen0); /* Find the index of the critical byte. */ for (i = 0; i < MIN(keylen, keylen0); i++) { newbyte = k[i]; if ((diff = k0[i] ^ newbyte) != 0) goto branch; } if (keylen < keylen0) { /* new key is shorter */ newbyte = 0; do { if ((diff = k0[i]) != 0) break; } while (++i < keylen0); if (i == keylen0) /* no 1 bits in k0, fake one at end */ diff = 0x80; goto branch; } else if (keylen0 < keylen) { /* existing key is shorter */ do { newbyte = k[i]; if ((diff = newbyte) != 0) break; } while (++i < keylen); if (i == keylen) /* no 1 bits in k, fake one at end */ diff = newbyte = 0x80; goto branch; } DBG(N_ASSERT_MAYBE_INSERTED(N0)); return N0; /* collision */ branch: /* * Find the critical bit: * * . Set all bits from LSB up to the critical bit. * . Clear all bits but the critical bit. * . Complement -- set all bits but the critical bit. * * This way, newbits | b is 0xff iff b has 1 in the critical * bit, so that (1 + (newbits | b)) >> 8 is 1 iff b has 1 in * the critical bit. */ diff |= diff >> 1; diff |= diff >> 2; diff |= diff >> 4; diff ^= diff >> 1; newbits = diff ^ 0xff; /* Direction from parent in which to put the new item. */ newdir = (1 + (newbits | newbyte)) >> 8; lock_restart: /* Find where to insert a new branch. */ for (B0 = NULL, child = *(childp = &C->cb_root); isbranch(child); B0 = B, child = *(childp = &B->cbb_child[dir])) { uint32_t meta; B = child2branch(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); meta = B->cbb_meta; DBG(B_ASSERT_MAYBE_INSERTED(B)); if (i < getindex(meta)) break; if (i == getindex(meta) && newbits < getbits(meta)) break; byte = (getindex(meta) < keylen? k[getindex(meta)] : 0); dir = (1 + (byte | getbits(meta))) >> 8; } /* * Lock the parent, if necessary. If a deletion got in first, * the parent may be going away, so try again to find where to * put this. * * No need for a membar here: everything we need to load was * already synchronized with membar_producer in other * critbit_insert operations by the above * membar_datadep_consumer, and the only public stores we issue * are ordered by membar_producer below and then not published * until atomic_cas. */ if (B0) { if (branch_trylock(B0)) goto lock_restart; } /* Initialize the new branch. */ B = &N->cbn_branch; B->cbb_meta = makemeta(i, newbits); B->cbb_child[newdir] = leaf2child(N); B->cbb_child[1 - newdir] = child; /* Mark the node inserted. */ DBG(N_INSERTED(N)); /* Publish the new branch content to all CPUs. */ (*C->cb_ops->cbo_membar_producer)(); /* * Insert it. Note that there is a path up from N to its * branch B. This will remain so under future insertions * because they never move subtrees around, and under future * deletions because they recycle deleted branch structures * only for higher branches. */ if (atomic_cas_uintptr(childp, child, branch2child(B)) != child) { if (B0) branch_unlock(B0); goto restart; } /* Unlock the parent. */ if (B0) branch_unlock(B0); DBG(N_ASSERT_MAYBE_INSERTED(N)); return N; /* inserted */ } /* Delete */ struct critbit_node * critbit_txn_delete_key(struct critbit *C, const void *key, size_t keylen, struct critbit_txn_delete *T) { const uint8_t *const k = key; struct critbit_branch *B = NULL; volatile uintptr_t *childp, *parentp = NULL; unsigned dir = 0 /*XXXGCC*/, byte; struct critbit_node *N; const uint8_t *k0; size_t keylen0; uint32_t i; struct critbit_txn_delete_1 *T1, *T1_B = NULL, *T1_N = NULL; restart: /* If the tree is empty, do nothing. */ if ((child = *(childp = &C->cb_root)) == 0) return NULL; /* Find the closest node. */ for (; isbranch(child); child = *(childp = &B->cbb_child[dir])) { uint32_t meta; parentp = childp; B = child2branch(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); meta = B->cbb_meta; DBG(B_ASSERT_NOT_COMMITTED(B)); byte = (getindex(meta) < keylen? k[getindex(meta)] : 0); dir = (1 + (byte | getbits(meta))) >> 8; } /* Ignore it if it doesn't match. */ assert(isleaf(child)); N = child2leaf(child); DBG(N_ASSERT_NOT_COMMITTED(N)); k0 = critbit_key(C, N, &keylen0); if (keylen0 != keylen) return NULL; for (i = 0; i < keylen; i++) { if (k0[i] != k[i]) return NULL; } DBG(N_ASSERT_INSERTED(N)); /* If the root was a leaf, just replace it by empty. */ if (parentp == NULL) { assert(childp == &C->cb_root); assert(C->cb_root == leaf2child(N)); if (atomic_cas_uintptr(&C->cb_root, child, 0) != child) goto restart; DBG(N_ASSERT_INSERTED(N)); DBG(N_DELETED(N)); return N; } /* Otherwise, replace it by the other child. */ assert(B != NULL); assert(B->cbb_child[dir] == leaf2child(N)); if (atomic_cas_uintptr(parentp, child, B->cbb_child[revdir(dir)]) != child) goto restart; DBG(struct critbit_txn_delete_1 *T1_ = NULL); DBG(N_PENDING(N)); /* Search for overlapping deletion or recycling. */ for (T1 = T->ctd_q; T1 < T->ctd_q + T->ctd_i; T1++) { /* * Skip discarded records. Any node left to be deleted * here had better be in the PENDING state. */ assert((T1->ctd1_delete == NULL) == (T1->ctd1_recycle == NULL)); if (T1->ctd1_delete == NULL) continue; DBG(N_ASSERT_PENDING(T1->ctd1_delete)); /* Check whether we were going to delete B's node. */ if (T1->ctd1_delete == container_of(B, struct critbit_node, cbn_branch)) { assert(T1_B == NULL); T1_B = T1; } /* Check whether someone was hoping to recycle N's branch. */ if (T1->ctd1_recycle == &N->cbn_branch) { assert(T1_N == NULL); T1_N = T1; } } /* Decide what to do if we found overlapping deletion/recycling. */ if (T1_B && T1_N) { /* * The node of B is being deleted and recycled to some * branch B', and some node N' is being deleted and * recycled to the branch of N. Recycle N' to B' * instead, and forget about B and N. * * It is possible that the records coincide, if a node * is being deleted and recycled (but not in the same * step), in which case this has the effect of * discarding the common record. * * If they do not coincide, then: * * (a) there is a path up from B' to B, * (b) there is a path up from the branch of N to the * branch of N'. * * Since we know there is additionally a path up from B * to the branch of N, there is a path up from B' to * the branch of N'. */ assert(T1_B->ctd1_delete == container_of(B, struct critbit_node, cbn_branch)); assert(T1_N->ctd1_recycle == &N->cbn_branch); assert((T1_N->ctd1_delete == T1_B->ctd1_delete) == (T1_N == T1_B)); if (T1_N != T1_B) { DBG(N_ASSERT_PENDING(T1_B->ctd1_delete)); DBG(N_ASSERT_PENDING(T1_N->ctd1_delete)); DBG(N_DELETED(T1_B->ctd1_delete)); T1_B->ctd1_delete = T1_N->ctd1_delete; } else { DBG(N_ASSERT_PENDING(T1_N->ctd1_delete)); DBG(N_DELETED(T1_N->ctd1_delete)); } T1_N->ctd1_delete = NULL; T1_N->ctd1_recycle = NULL; DBG(N_ASSERT_PENDING(N)); DBG(N_DELETED(N)); goto out; } else if (T1_B) { /* * The node of B is being deleted and recycled to some * branch B'. Forget about B; recycle N to B'. * * If the node of B is being recycled to B', then there * is a path up from the node of B via B' to B. Since * there is also a path up from N via B to the branch * of N, there is a path up from B' to the branch of N. */ assert(T1_B->ctd1_delete == container_of(B, struct critbit_node, cbn_branch)); DBG(N_ASSERT_PENDING(T1_B->ctd1_delete)); DBG(N_DELETED(T1_B->ctd1_delete)); T1_B->ctd1_delete = N; DBG(T1_ = T1_B); DBG(N_ASSERT_PENDING(N)); goto out; } else if (T1_N) { /* * Some node N' is being deleted and recycled to the * branch of N. Forget about N; recycle N' to B. * * If N' is being recycled to the branch of N, then * there is a path up from the branch of N to the * branch of N'. Since there is also a path up from N * via B to the branch of N, there is a path up from B * to the branch of N'. */ assert(T1_N->ctd1_recycle == &N->cbn_branch); DBG(N_ASSERT_PENDING(N)); DBG(N_DELETED(N)); DBG(N_ASSERT_PENDING(T1_N->ctd1_delete)); T1_N->ctd1_recycle = B; goto out; } /* If we removed N's branch, we're done. */ if (B == &N->cbn_branch) { DBG(N_ASSERT_PENDING(N)); DBG(N_DELETED(N)); goto out; } /* * Otherwise, queue B up for recycling so we can delete N. * There is guaranteed to be a path up from B to the branch of * N if the branch of N is in use. Thus recycled branches only * ever move up in the tree, which means their nodes still have * paths up to them. */ assert(T->ctd_i < T->ctd_n); T1 = &T->ctd_q[T->ctd_i++]; T1->ctd1_delete = N; T1->ctd1_recycle = B; DBG(N_ASSERT_PENDING(N)); DBG(T1_ = T1); out: /* * - T1_ is N's entry in the queue, if any. * - N is pending or deleted at this point. * - N is pending iff it appears in the queue. * - N is deleted iff it does not appear in the queue. */ DBG(assert(T1_ == NULL || T1_->ctd1_delete == N)); DBG(if (T1_ != NULL) N_ASSERT_PENDING(N)); DBG(if (T1_ == NULL) N_ASSERT_DELETED(N)); return N; } /* Delete transaction */ void critbit_txn_delete_begin(struct critbit *C __unused, struct critbit_txn_delete *T, struct critbit_txn_delete_1 *T1, size_t n) { T->ctd_q = T1; T->ctd_i = 0; T->ctd_n = n; } void critbit_txn_delete_commit(struct critbit *C, struct critbit_txn_delete *T) { size_t i; /* Wait for all users of branches we may recycle to drain. */ (*C->cb_ops->cbo_sync)(C); /* * For each node N to delete, find whether its branch is still * in use. If it is, recycle its chosen extant branch for it. */ for (i = 0; i < T->ctd_i; i++) { const struct critbit_txn_delete_1 *const T1 = &T->ctd_q[i]; struct critbit_node *const N = T1->ctd1_delete; struct critbit_branch *B = NULL; const uint8_t *k; size_t keylen; volatile uintptr_t *childp; uintptr_t child; unsigned dir, byte; uint8_t lock; if (N == NULL) continue; DBG(N_ASSERT_PENDING(N)); /* Find where N is. */ k = critbit_key(C, N, &keylen); for (child = *(childp = &C->cb_root); isbranch(child); child = *(childp = &B->cbb_child[dir])) { uint32_t meta; B = child2branch(child); (*C->cb_ops->cbo_membar_datadep_consumer)(); DBG(B_ASSERT_NOT_COMMITTED(B)); if (B == &N->cbn_branch) break; meta = B->cbb_meta; byte = (getindex(meta) < keylen? k[getindex(meta)] : 0); dir = (1 + (byte | getbits(meta))) >> 8; } assert(childp != NULL); /* Didn't find it, so no need to recycle. */ if (B != &N->cbn_branch) continue; /* * Lock the branch we're about to snapshot and delete. * Nobody else can be locking it because we're the only * ones deleting the node. No need to unlock because * we're done with the node afterward -- nobody else * can haz it. */ branch_lock(B); /* Found it at childp. Recycle (but preserve magic). */ #if CRITBIT_DEBUG memcpy(T1->ctd1_recycle, B, offsetof(struct critbit_branch, cbb_magic)); #else *T1->ctd1_recycle = *B; #endif /* Guarantee it is initalized before we publish it. */ (*C->cb_ops->cbo_membar_producer)(); /* Publish the recycled branch. */ *childp = branch2child(T1->ctd1_recycle); } /* Wait for all users of deleted nodes to drain. */ (*C->cb_ops->cbo_sync)(C); /* Mark all of the deleted nodes as newly initialized. */ for (i = 0; i < T->ctd_i; i++) { const struct critbit_txn_delete_1 *const T1 = &T->ctd_q[i]; struct critbit_node *const N = T1->ctd1_delete; if (N == NULL) continue; DBG(N_ASSERT_PENDING(N)); DBG(N_INIT(N)); } } /* Delete utilities */ void critbit_txn_delete(struct critbit *C, struct critbit_node *N, struct critbit_txn_delete *T) { struct critbit_node *N0; const void *key; size_t keylen; /* Get the key. */ key = critbit_key(C, N, &keylen); /* * Delete by the key. Without parent pointers, we can do no * better than this. */ N0 = critbit_txn_delete_key(C, key, keylen, T); /* The node had better be the same one. */ assert(N0 == N); } struct critbit_node * critbit_delete_key(struct critbit *C, const void *key, size_t keylen) { struct critbit_txn_delete_1 txn1, *T1 = &txn1; struct critbit_txn_delete txn, *T = &txn; struct critbit_node *N; /* Delete the key in a single-operation transaction. */ critbit_txn_delete_begin(C, T, T1, 1); N = critbit_txn_delete_key(C, key, keylen, T); critbit_txn_delete_commit(C, T); DBG(if (N != NULL) N_ASSERT_NOT_INSERTED(N)); return N; } void critbit_delete(struct critbit *C, struct critbit_node *N) { struct critbit_txn_delete_1 txn1, *T1 = &txn1; struct critbit_txn_delete txn, *T = &txn; /* Delete the node in a single-operation transaction. */ critbit_txn_delete_begin(C, T, T1, 1); critbit_txn_delete(C, N, T); critbit_txn_delete_commit(C, T); DBG(N_ASSERT_NOT_INSERTED(N)); } /* Iteration */ void critbit_iter_init(struct critbit *C, struct critbit_iter *I, void *stack, size_t stacklen) { assert(stacklen <= CRITBIT_MAX_KEYLEN); I->cbi_critbit = C; I->cbi_stack = stack; I->cbi_stacklen = stacklen; I->cbi_depth = 0; } void critbit_iter_destroy(struct critbit *C __diagused, struct critbit_iter *I) { assert(I->cbi_critbit == C); assert(I->cbi_stack != NULL); I->cbi_critbit = NULL; I->cbi_stack = NULL; I->cbi_stacklen = 0; I->cbi_depth = UINT_MAX; } static struct critbit_node * critbit_iter_extreme(struct critbit *C, struct critbit_iter *I, unsigned dir) { unsigned char *S = I->cbi_stack; const unsigned maxdepth __diagused = CHAR_BIT*I->cbi_stacklen; struct critbit_branch *B; uintptr_t child; unsigned d; assert(I->cbi_critbit == C); assert(I->cbi_depth == 0); /* If there's nothing in the tree, we're done. */ if (C->cb_root == 0) return NULL; /* Move down and in the designated direction until we hit a leaf. */ for (d = 0, child = C->cb_root; assert(d < maxdepth), isbranch(child); d++, child = B->cbb_child[dir]) { B = child2branch(child); setdir(S, d, dir); } /* Mark where we were and return the node. */ assert(d < maxdepth); I->cbi_depth = d; return child2leaf(child); } struct critbit_node * critbit_iter_min(struct critbit *C, struct critbit_iter *I) { return critbit_iter_extreme(C, I, 0); } struct critbit_node * critbit_iter_max(struct critbit *C, struct critbit_iter *I) { return critbit_iter_extreme(C, I, 1); } static struct critbit_node * critbit_iter_adjacent(struct critbit *C, struct critbit_iter *I, unsigned dir) { unsigned char *S = I->cbi_stack; const unsigned maxdepth __diagused = CHAR_BIT*I->cbi_stacklen; struct critbit_branch *B; unsigned d0, d, dir0; uintptr_t child; assert(I->cbi_critbit == C); /* Move back up until we can move forward. */ for (d0 = I->cbi_depth; d0 --> 0;) { /* Can we move forward? */ if (getdir(S, d0) != dir) { /* Yes. First, make one step forward. */ setdir(S, d0++, dir); /* Next, follow the path down the stack. */ for (d = 0, child = C->cb_root; d < d0; d++, child = B->cbb_child[dir0]) { assert(isbranch(child)); B = child2branch(child); dir0 = getdir(S, d); } /* Now move down and in reverse as far as we can. */ for (; assert(d < maxdepth), isbranch(child); d++, child = B->cbb_child[revdir(dir)]) { B = child2branch(child); setdir(S, d, revdir(dir)); } /* Stop here. We are done. */ assert(d < maxdepth); I->cbi_depth = d; return child2leaf(child); } } /* There is nothing more. Note that we are done. */ I->cbi_depth = 0; return NULL; } struct critbit_node * critbit_iter_next(struct critbit *C, struct critbit_iter *I) { return critbit_iter_adjacent(C, I, 1); } struct critbit_node * critbit_iter_prev(struct critbit *C, struct critbit_iter *I) { return critbit_iter_adjacent(C, I, 0); } /* Consistency check */ static void critbit_check_leaf(const struct critbit *C, const struct critbit_node *N) { const uint8_t *k; size_t keylen; const struct critbit_branch *B; uintptr_t child; unsigned dir; DBG(N_ASSERT_INSERTED(N)); k = critbit_key(C, N, &keylen); assert(C->cb_root != 0); for (child = C->cb_root; isbranch(child); child = B->cbb_child[dir]) { uint32_t meta; unsigned byte; B = child2branch(child); meta = B->cbb_meta; DBG(B_ASSERT_INSERTED(B)); byte = (getindex(meta) < keylen? k[getindex(meta)] : 0); dir = (1 + (byte | getbits(meta))) >> 8; } assert(isleaf(child)); assert(N == child2leaf(child)); } static void critbit_check_child(struct critbit *C, uintptr_t child, size_t idx __diagused) { if (isbranch(child)) { struct critbit_branch *B = child2branch(child); uint32_t meta = B->cbb_meta; assert(idx <= getindex(meta)); DBG(B_ASSERT_INSERTED(B)); critbit_check_child(C, B->cbb_child[0], getindex(meta)); critbit_check_child(C, B->cbb_child[1], getindex(meta)); } else { critbit_check_leaf(C, child2leaf(child)); } } void critbit_check(struct critbit *C) { if (C->cb_root == 0) return; critbit_check_child(C, C->cb_root, 0); } /* Print */ /* * WARNING: This debugging operation requires that the node's key be a * NUL-terminated printable string. */ #include #include static void critbit_print_child(struct critbit *C, uintptr_t child, unsigned indent) { if (isbranch(child)) { struct critbit_branch *const B = child2branch(child); uint32_t meta = B->cbb_meta; uint16_t index = getindex(meta); uint8_t bits = getbits(meta); printf("%*sbranch on 8*%u + %u (0x%02x) @ %p\n", indent*2, "", index, 8 - ffs(0xff^bits), 0xff^bits, B); critbit_print_child(C, B->cbb_child[0], indent + 1); critbit_print_child(C, B->cbb_child[1], indent + 1); } else { struct critbit_node *const N = child2leaf(child); size_t keylen __unused; const uint8_t *const k = critbit_key(C, N, &keylen); printf("%*sleaf @ %p: %s\n", indent*2, "", N, k); } } void critbit_print(struct critbit *C) { if (C->cb_root == 0) { printf("empty\n"); return; } critbit_print_child(C, C->cb_root, 0); }