hibitset/
atomic.rs

1use std::{
2    default::Default,
3    fmt::{Debug, Error as FormatError, Formatter},
4    iter::repeat,
5    sync::atomic::{AtomicUsize, Ordering},
6};
7
8use atom::AtomSetOnce;
9
10use crate::{util::*, BitSetLike, DrainableBitSet};
11
12/// This is similar to a [`BitSet`] but allows setting of value
13/// without unique ownership of the structure
14///
15/// An `AtomicBitSet` has the ability to add an item to the set
16/// without unique ownership (given that the set is big enough).
17/// Removing elements does require unique ownership as an effect
18/// of the hierarchy it holds. Worst case multiple writers set the
19/// same bit twice (but only is told they set it).
20///
21/// It is possible to atomically remove from the set, but not at the
22/// same time as atomically adding. This is because there is no way
23/// to know if layer 1-3 would be left in a consistent state if they are
24/// being cleared and set at the same time.
25///
26/// `AtromicBitSet` resolves this race by disallowing atomic
27/// clearing of bits.
28///
29/// [`BitSet`]: ../struct.BitSet.html
30#[derive(Debug)]
31pub struct AtomicBitSet {
32    layer3: AtomicUsize,
33    layer2: Vec<AtomicUsize>,
34    layer1: Vec<AtomicBlock>,
35}
36
37impl AtomicBitSet {
38    /// Creates an empty `AtomicBitSet`.
39    pub fn new() -> AtomicBitSet { Default::default() }
40
41    /// Adds `id` to the `AtomicBitSet`. Returns `true` if the value was
42    /// already in the set.
43    ///
44    /// Because we cannot safely extend an AtomicBitSet without unique ownership
45    /// this will panic if the Index is out of range.
46    #[inline]
47    pub fn add_atomic(&self, id: Index) -> bool {
48        let (_, p1, p2) = offsets(id);
49
50        // While it is tempting to check of the bit was set and exit here if it
51        // was, this can result in a data race. If this thread and another
52        // thread both set the same bit it is possible for the second thread
53        // to exit before l3 was set. Resulting in the iterator to be in an
54        // incorrect state. The window is small, but it exists.
55        let set = self.layer1[p1].add(id);
56        self.layer2[p2].fetch_or(id.mask(SHIFT2), Ordering::Relaxed);
57        self.layer3.fetch_or(id.mask(SHIFT3), Ordering::Relaxed);
58        set
59    }
60
61    /// Adds `id` to the `BitSet`. Returns `true` if the value was
62    /// already in the set.
63    #[inline]
64    pub fn add(&mut self, id: Index) -> bool {
65        use std::sync::atomic::Ordering::Relaxed;
66
67        let (_, p1, p2) = offsets(id);
68        if self.layer1[p1].add(id) {
69            return true;
70        }
71
72        self.layer2[p2]
73            .store(self.layer2[p2].load(Relaxed) | id.mask(SHIFT2), Relaxed);
74        self.layer3
75            .store(self.layer3.load(Relaxed) | id.mask(SHIFT3), Relaxed);
76        false
77    }
78
79    /// Removes `id` from the set, returns `true` if the value
80    /// was removed, and `false` if the value was not set
81    /// to begin with.
82    #[inline]
83    pub fn remove(&mut self, id: Index) -> bool {
84        use std::sync::atomic::Ordering::Relaxed;
85        let (_, p1, p2) = offsets(id);
86
87        // if the bitmask was set we need to clear
88        // its bit from layer0 to 3. the layers above only
89        // should be cleared if the bit cleared was the last bit
90        // in its set
91        //
92        // These are used over a `fetch_and` because we have a mutable
93        // access to the AtomicBitSet so this is sound (and faster)
94        if !self.layer1[p1].remove(id) {
95            return false;
96        }
97        if self.layer1[p1].mask.load(Ordering::Relaxed) != 0 {
98            return true;
99        }
100
101        let v = self.layer2[p2].load(Relaxed) & !id.mask(SHIFT2);
102        self.layer2[p2].store(v, Relaxed);
103        if v != 0 {
104            return true;
105        }
106
107        let v = self.layer3.load(Relaxed) & !id.mask(SHIFT3);
108        self.layer3.store(v, Relaxed);
109        return true;
110    }
111
112    /// Returns `true` if `id` is in the set.
113    #[inline]
114    pub fn contains(&self, id: Index) -> bool {
115        let i = id.offset(SHIFT2);
116        self.layer1[i].contains(id)
117    }
118
119    /// Clear all bits in the set
120    pub fn clear(&mut self) {
121        // This is the same hierarchical-striding used in the iterators.
122        // Using this technique we can avoid clearing segments of the bitset
123        // that are already clear. In the best case when the set is already
124        // cleared, this will only touch the highest layer.
125
126        let (mut m3, mut m2) = (self.layer3.swap(0, Ordering::Relaxed), 0usize);
127        let mut offset = 0;
128
129        loop {
130            if m2 != 0 {
131                let bit = m2.trailing_zeros() as usize;
132                m2 &= !(1 << bit);
133
134                // layer 1 & 0 are cleared unconditionally. it's only 32-64
135                // words and the extra logic to select the
136                // correct works is slower then just clearing
137                // them all.
138                self.layer1[offset + bit].clear();
139                continue;
140            }
141
142            if m3 != 0 {
143                let bit = m3.trailing_zeros() as usize;
144                m3 &= !(1 << bit);
145                offset = bit << BITS;
146                m2 = self.layer2[bit].swap(0, Ordering::Relaxed);
147                continue;
148            }
149            break;
150        }
151    }
152}
153
154impl BitSetLike for AtomicBitSet {
155    #[inline]
156    fn layer3(&self) -> usize { self.layer3.load(Ordering::Relaxed) }
157
158    #[inline]
159    fn layer2(&self, i: usize) -> usize {
160        self.layer2[i].load(Ordering::Relaxed)
161    }
162
163    #[inline]
164    fn layer1(&self, i: usize) -> usize {
165        self.layer1[i].mask.load(Ordering::Relaxed)
166    }
167
168    #[inline]
169    fn layer0(&self, i: usize) -> usize {
170        let (o1, o0) = (i >> BITS, i & ((1 << BITS) - 1));
171        self.layer1[o1]
172            .atom
173            .get()
174            .map(|l0| l0[o0].load(Ordering::Relaxed))
175            .unwrap_or(0)
176    }
177
178    #[inline]
179    fn contains(&self, i: Index) -> bool { self.contains(i) }
180}
181
182impl DrainableBitSet for AtomicBitSet {
183    #[inline]
184    fn remove(&mut self, i: Index) -> bool { self.remove(i) }
185}
186
187impl Default for AtomicBitSet {
188    fn default() -> Self {
189        AtomicBitSet {
190            layer3: Default::default(),
191            layer2: repeat(0)
192                .map(|_| AtomicUsize::new(0))
193                .take(1 << BITS)
194                .collect(),
195            layer1: repeat(0)
196                .map(|_| AtomicBlock::new())
197                .take(1 << (2 * BITS))
198                .collect(),
199        }
200    }
201}
202
203struct AtomicBlock {
204    mask: AtomicUsize,
205    atom: AtomSetOnce<Box<[AtomicUsize; 1 << BITS]>>,
206}
207
208impl AtomicBlock {
209    fn new() -> AtomicBlock {
210        AtomicBlock {
211            mask: AtomicUsize::new(0),
212            atom: AtomSetOnce::empty(),
213        }
214    }
215
216    fn add(&self, id: Index) -> bool {
217        if self.atom.is_none() {
218            const ZERO: AtomicUsize = AtomicUsize::new(0);
219            let v = Box::new([ZERO; 1 << BITS]);
220            self.atom.set_if_none(v);
221        }
222
223        let (i, m) = (id.row(SHIFT1), id.mask(SHIFT0));
224        let old = self.atom.get().unwrap()[i].fetch_or(m, Ordering::Relaxed);
225        self.mask.fetch_or(id.mask(SHIFT1), Ordering::Relaxed);
226        old & m != 0
227    }
228
229    fn contains(&self, id: Index) -> bool {
230        self.atom
231            .get()
232            .map(|l0| {
233                l0[id.row(SHIFT1)].load(Ordering::Relaxed) & id.mask(SHIFT0)
234                    != 0
235            })
236            .unwrap_or(false)
237    }
238
239    fn remove(&mut self, id: Index) -> bool {
240        if let Some(l0) = self.atom.get_mut() {
241            let (i, m) = (id.row(SHIFT1), !id.mask(SHIFT0));
242            let v = l0[i].load(Ordering::Relaxed);
243            l0[i].store(v & m, Ordering::Relaxed);
244            if v & m == 0 {
245                let v = self.mask.load(Ordering::Relaxed) & !id.mask(SHIFT1);
246                self.mask.store(v, Ordering::Relaxed);
247            }
248            v & id.mask(SHIFT0) == id.mask(SHIFT0)
249        } else {
250            false
251        }
252    }
253
254    fn clear(&mut self) {
255        self.mask.store(0, Ordering::Relaxed);
256        self.atom.get().map(|l0| {
257            for l in &l0[..] {
258                l.store(0, Ordering::Relaxed);
259            }
260        });
261    }
262}
263
264impl Debug for AtomicBlock {
265    fn fmt(&self, f: &mut Formatter) -> Result<(), FormatError> {
266        f.debug_struct("AtomicBlock")
267            .field("mask", &self.mask)
268            .field("atom", &self.atom.get().unwrap().iter())
269            .finish()
270    }
271}
272
273#[cfg(test)]
274mod atomic_set_test {
275    use crate::{AtomicBitSet, BitSetAnd, BitSetLike};
276
277    #[test]
278    fn insert() {
279        let mut c = AtomicBitSet::new();
280        for i in 0..1_000 {
281            assert!(!c.add(i));
282            assert!(c.add(i));
283        }
284
285        for i in 0..1_000 {
286            assert!(c.contains(i));
287        }
288    }
289
290    #[test]
291    fn insert_100k() {
292        let mut c = AtomicBitSet::new();
293        for i in 0..100_000 {
294            assert!(!c.add(i));
295            assert!(c.add(i));
296        }
297
298        for i in 0..100_000 {
299            assert!(c.contains(i));
300        }
301    }
302
303    #[test]
304    fn add_atomic() {
305        let c = AtomicBitSet::new();
306        for i in 0..1_000 {
307            assert!(!c.add_atomic(i));
308            assert!(c.add_atomic(i));
309        }
310
311        for i in 0..1_000 {
312            assert!(c.contains(i));
313        }
314    }
315
316    #[test]
317    fn add_atomic_100k() {
318        let c = AtomicBitSet::new();
319        for i in 0..100_000 {
320            assert!(!c.add_atomic(i));
321            assert!(c.add_atomic(i));
322        }
323
324        for i in 0..100_000 {
325            assert!(c.contains(i));
326        }
327    }
328
329    #[test]
330    fn remove() {
331        let mut c = AtomicBitSet::new();
332        for i in 0..1_000 {
333            assert!(!c.add(i));
334        }
335
336        for i in 0..1_000 {
337            assert!(c.contains(i));
338            assert!(c.remove(i));
339            assert!(!c.contains(i));
340            assert!(!c.remove(i));
341        }
342    }
343
344    #[test]
345    fn iter() {
346        let mut c = AtomicBitSet::new();
347        for i in 0..100_000 {
348            c.add(i);
349        }
350
351        let mut count = 0;
352        for (idx, i) in c.iter().enumerate() {
353            count += 1;
354            assert_eq!(idx, i as usize);
355        }
356        assert_eq!(count, 100_000);
357    }
358
359    #[test]
360    fn iter_odd_even() {
361        let mut odd = AtomicBitSet::new();
362        let mut even = AtomicBitSet::new();
363        for i in 0..100_000 {
364            if i % 2 == 1 {
365                odd.add(i);
366            } else {
367                even.add(i);
368            }
369        }
370
371        assert_eq!((&odd).iter().count(), 50_000);
372        assert_eq!((&even).iter().count(), 50_000);
373        assert_eq!(BitSetAnd(&odd, &even).iter().count(), 0);
374    }
375
376    #[test]
377    fn clear() {
378        let mut set = AtomicBitSet::new();
379        for i in 0..1_000 {
380            set.add(i);
381        }
382
383        assert_eq!((&set).iter().sum::<u32>(), 500_500 - 1_000);
384
385        assert_eq!((&set).iter().count(), 1_000);
386        set.clear();
387        assert_eq!((&set).iter().count(), 0);
388
389        for i in 0..1_000 {
390            set.add(i * 64);
391        }
392
393        assert_eq!((&set).iter().count(), 1_000);
394        set.clear();
395        assert_eq!((&set).iter().count(), 0);
396
397        for i in 0..1_000 {
398            set.add(i * 1_000);
399        }
400
401        assert_eq!((&set).iter().count(), 1_000);
402        set.clear();
403        assert_eq!((&set).iter().count(), 0);
404
405        for i in 0..100 {
406            set.add(i * 10_000);
407        }
408
409        assert_eq!((&set).iter().count(), 100);
410        set.clear();
411        assert_eq!((&set).iter().count(), 0);
412
413        for i in 0..10 {
414            set.add(i * 10_000);
415        }
416
417        assert_eq!((&set).iter().count(), 10);
418        set.clear();
419        assert_eq!((&set).iter().count(), 0);
420    }
421}