rustc_index/
bit_set.rs

1use std::marker::PhantomData;
2#[cfg(not(feature = "nightly"))]
3use std::mem;
4use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
5use std::rc::Rc;
6use std::{fmt, iter, slice};
7
8use Chunk::*;
9#[cfg(feature = "nightly")]
10use rustc_macros::{Decodable_NoContext, Encodable_NoContext};
11use smallvec::{SmallVec, smallvec};
12
13use crate::{Idx, IndexVec};
14
15#[cfg(test)]
16mod tests;
17
18type Word = u64;
19const WORD_BYTES: usize = size_of::<Word>();
20const WORD_BITS: usize = WORD_BYTES * 8;
21
22// The choice of chunk size has some trade-offs.
23//
24// A big chunk size tends to favour cases where many large `ChunkedBitSet`s are
25// present, because they require fewer `Chunk`s, reducing the number of
26// allocations and reducing peak memory usage. Also, fewer chunk operations are
27// required, though more of them might be `Mixed`.
28//
29// A small chunk size tends to favour cases where many small `ChunkedBitSet`s
30// are present, because less space is wasted at the end of the final chunk (if
31// it's not full).
32const CHUNK_WORDS: usize = 32;
33const CHUNK_BITS: usize = CHUNK_WORDS * WORD_BITS; // 2048 bits
34
35/// ChunkSize is small to keep `Chunk` small. The static assertion ensures it's
36/// not too small.
37type ChunkSize = u16;
38const _: () = assert!(CHUNK_BITS <= ChunkSize::MAX as usize);
39
40pub trait BitRelations<Rhs> {
41    fn union(&mut self, other: &Rhs) -> bool;
42    fn subtract(&mut self, other: &Rhs) -> bool;
43    fn intersect(&mut self, other: &Rhs) -> bool;
44}
45
46#[inline]
47fn inclusive_start_end<T: Idx>(
48    range: impl RangeBounds<T>,
49    domain: usize,
50) -> Option<(usize, usize)> {
51    // Both start and end are inclusive.
52    let start = match range.start_bound().cloned() {
53        Bound::Included(start) => start.index(),
54        Bound::Excluded(start) => start.index() + 1,
55        Bound::Unbounded => 0,
56    };
57    let end = match range.end_bound().cloned() {
58        Bound::Included(end) => end.index(),
59        Bound::Excluded(end) => end.index().checked_sub(1)?,
60        Bound::Unbounded => domain - 1,
61    };
62    assert!(end < domain);
63    if start > end {
64        return None;
65    }
66    Some((start, end))
67}
68
69macro_rules! bit_relations_inherent_impls {
70    () => {
71        /// Sets `self = self | other` and returns `true` if `self` changed
72        /// (i.e., if new bits were added).
73        pub fn union<Rhs>(&mut self, other: &Rhs) -> bool
74        where
75            Self: BitRelations<Rhs>,
76        {
77            <Self as BitRelations<Rhs>>::union(self, other)
78        }
79
80        /// Sets `self = self - other` and returns `true` if `self` changed.
81        /// (i.e., if any bits were removed).
82        pub fn subtract<Rhs>(&mut self, other: &Rhs) -> bool
83        where
84            Self: BitRelations<Rhs>,
85        {
86            <Self as BitRelations<Rhs>>::subtract(self, other)
87        }
88
89        /// Sets `self = self & other` and return `true` if `self` changed.
90        /// (i.e., if any bits were removed).
91        pub fn intersect<Rhs>(&mut self, other: &Rhs) -> bool
92        where
93            Self: BitRelations<Rhs>,
94        {
95            <Self as BitRelations<Rhs>>::intersect(self, other)
96        }
97    };
98}
99
100/// A fixed-size bitset type with a dense representation.
101///
102/// Note 1: Since this bitset is dense, if your domain is big, and/or relatively
103/// homogeneous (for example, with long runs of bits set or unset), then it may
104/// be preferable to instead use a [MixedBitSet], or an
105/// [IntervalSet](crate::interval::IntervalSet). They should be more suited to
106/// sparse, or highly-compressible, domains.
107///
108/// Note 2: Use [`GrowableBitSet`] if you need support for resizing after creation.
109///
110/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
111/// just be `usize`.
112///
113/// All operations that involve an element will panic if the element is equal
114/// to or greater than the domain size. All operations that involve two bitsets
115/// will panic if the bitsets have differing domain sizes.
116///
117#[cfg_attr(feature = "nightly", derive(Decodable_NoContext, Encodable_NoContext))]
118#[derive(Eq, PartialEq, Hash)]
119pub struct DenseBitSet<T> {
120    domain_size: usize,
121    words: SmallVec<[Word; 2]>,
122    marker: PhantomData<T>,
123}
124
125impl<T> DenseBitSet<T> {
126    /// Gets the domain size.
127    pub fn domain_size(&self) -> usize {
128        self.domain_size
129    }
130}
131
132impl<T: Idx> DenseBitSet<T> {
133    /// Creates a new, empty bitset with a given `domain_size`.
134    #[inline]
135    pub fn new_empty(domain_size: usize) -> DenseBitSet<T> {
136        let num_words = num_words(domain_size);
137        DenseBitSet { domain_size, words: smallvec![0; num_words], marker: PhantomData }
138    }
139
140    /// Creates a new, filled bitset with a given `domain_size`.
141    #[inline]
142    pub fn new_filled(domain_size: usize) -> DenseBitSet<T> {
143        let num_words = num_words(domain_size);
144        let mut result =
145            DenseBitSet { domain_size, words: smallvec![!0; num_words], marker: PhantomData };
146        result.clear_excess_bits();
147        result
148    }
149
150    /// Clear all elements.
151    #[inline]
152    pub fn clear(&mut self) {
153        self.words.fill(0);
154    }
155
156    /// Clear excess bits in the final word.
157    fn clear_excess_bits(&mut self) {
158        clear_excess_bits_in_final_word(self.domain_size, &mut self.words);
159    }
160
161    /// Count the number of set bits in the set.
162    pub fn count(&self) -> usize {
163        self.words.iter().map(|e| e.count_ones() as usize).sum()
164    }
165
166    /// Returns `true` if `self` contains `elem`.
167    #[inline]
168    pub fn contains(&self, elem: T) -> bool {
169        assert!(elem.index() < self.domain_size);
170        let (word_index, mask) = word_index_and_mask(elem);
171        (self.words[word_index] & mask) != 0
172    }
173
174    /// Is `self` is a (non-strict) superset of `other`?
175    #[inline]
176    pub fn superset(&self, other: &DenseBitSet<T>) -> bool {
177        assert_eq!(self.domain_size, other.domain_size);
178        self.words.iter().zip(&other.words).all(|(a, b)| (a & b) == *b)
179    }
180
181    /// Is the set empty?
182    #[inline]
183    pub fn is_empty(&self) -> bool {
184        self.words.iter().all(|a| *a == 0)
185    }
186
187    /// Insert `elem`. Returns whether the set has changed.
188    #[inline]
189    pub fn insert(&mut self, elem: T) -> bool {
190        assert!(
191            elem.index() < self.domain_size,
192            "inserting element at index {} but domain size is {}",
193            elem.index(),
194            self.domain_size,
195        );
196        let (word_index, mask) = word_index_and_mask(elem);
197        let word_ref = &mut self.words[word_index];
198        let word = *word_ref;
199        let new_word = word | mask;
200        *word_ref = new_word;
201        new_word != word
202    }
203
204    #[inline]
205    pub fn insert_range(&mut self, elems: impl RangeBounds<T>) {
206        let Some((start, end)) = inclusive_start_end(elems, self.domain_size) else {
207            return;
208        };
209
210        let (start_word_index, start_mask) = word_index_and_mask(start);
211        let (end_word_index, end_mask) = word_index_and_mask(end);
212
213        // Set all words in between start and end (exclusively of both).
214        for word_index in (start_word_index + 1)..end_word_index {
215            self.words[word_index] = !0;
216        }
217
218        if start_word_index != end_word_index {
219            // Start and end are in different words, so we handle each in turn.
220            //
221            // We set all leading bits. This includes the start_mask bit.
222            self.words[start_word_index] |= !(start_mask - 1);
223            // And all trailing bits (i.e. from 0..=end) in the end word,
224            // including the end.
225            self.words[end_word_index] |= end_mask | (end_mask - 1);
226        } else {
227            self.words[start_word_index] |= end_mask | (end_mask - start_mask);
228        }
229    }
230
231    /// Sets all bits to true.
232    pub fn insert_all(&mut self) {
233        self.words.fill(!0);
234        self.clear_excess_bits();
235    }
236
237    /// Checks whether any bit in the given range is a 1.
238    #[inline]
239    pub fn contains_any(&self, elems: impl RangeBounds<T>) -> bool {
240        let Some((start, end)) = inclusive_start_end(elems, self.domain_size) else {
241            return false;
242        };
243        let (start_word_index, start_mask) = word_index_and_mask(start);
244        let (end_word_index, end_mask) = word_index_and_mask(end);
245
246        if start_word_index == end_word_index {
247            self.words[start_word_index] & (end_mask | (end_mask - start_mask)) != 0
248        } else {
249            if self.words[start_word_index] & !(start_mask - 1) != 0 {
250                return true;
251            }
252
253            let remaining = start_word_index + 1..end_word_index;
254            if remaining.start <= remaining.end {
255                self.words[remaining].iter().any(|&w| w != 0)
256                    || self.words[end_word_index] & (end_mask | (end_mask - 1)) != 0
257            } else {
258                false
259            }
260        }
261    }
262
263    /// Returns `true` if the set has changed.
264    #[inline]
265    pub fn remove(&mut self, elem: T) -> bool {
266        assert!(elem.index() < self.domain_size);
267        let (word_index, mask) = word_index_and_mask(elem);
268        let word_ref = &mut self.words[word_index];
269        let word = *word_ref;
270        let new_word = word & !mask;
271        *word_ref = new_word;
272        new_word != word
273    }
274
275    /// Iterates over the indices of set bits in a sorted order.
276    #[inline]
277    pub fn iter(&self) -> BitIter<'_, T> {
278        BitIter::new(&self.words)
279    }
280
281    pub fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
282        let (start, end) = inclusive_start_end(range, self.domain_size)?;
283        let (start_word_index, _) = word_index_and_mask(start);
284        let (end_word_index, end_mask) = word_index_and_mask(end);
285
286        let end_word = self.words[end_word_index] & (end_mask | (end_mask - 1));
287        if end_word != 0 {
288            let pos = max_bit(end_word) + WORD_BITS * end_word_index;
289            if start <= pos {
290                return Some(T::new(pos));
291            }
292        }
293
294        // We exclude end_word_index from the range here, because we don't want
295        // to limit ourselves to *just* the last word: the bits set it in may be
296        // after `end`, so it may not work out.
297        if let Some(offset) =
298            self.words[start_word_index..end_word_index].iter().rposition(|&w| w != 0)
299        {
300            let word_idx = start_word_index + offset;
301            let start_word = self.words[word_idx];
302            let pos = max_bit(start_word) + WORD_BITS * word_idx;
303            if start <= pos {
304                return Some(T::new(pos));
305            }
306        }
307
308        None
309    }
310
311    bit_relations_inherent_impls! {}
312
313    /// Sets `self = self | !other`.
314    ///
315    /// FIXME: Incorporate this into [`BitRelations`] and fill out
316    /// implementations for other bitset types, if needed.
317    pub fn union_not(&mut self, other: &DenseBitSet<T>) {
318        assert_eq!(self.domain_size, other.domain_size);
319
320        // FIXME(Zalathar): If we were to forcibly _set_ all excess bits before
321        // the bitwise update, and then clear them again afterwards, we could
322        // quickly and accurately detect whether the update changed anything.
323        // But that's only worth doing if there's an actual use-case.
324
325        bitwise(&mut self.words, &other.words, |a, b| a | !b);
326        // The bitwise update `a | !b` can result in the last word containing
327        // out-of-domain bits, so we need to clear them.
328        self.clear_excess_bits();
329    }
330}
331
332// dense REL dense
333impl<T: Idx> BitRelations<DenseBitSet<T>> for DenseBitSet<T> {
334    fn union(&mut self, other: &DenseBitSet<T>) -> bool {
335        assert_eq!(self.domain_size, other.domain_size);
336        bitwise(&mut self.words, &other.words, |a, b| a | b)
337    }
338
339    fn subtract(&mut self, other: &DenseBitSet<T>) -> bool {
340        assert_eq!(self.domain_size, other.domain_size);
341        bitwise(&mut self.words, &other.words, |a, b| a & !b)
342    }
343
344    fn intersect(&mut self, other: &DenseBitSet<T>) -> bool {
345        assert_eq!(self.domain_size, other.domain_size);
346        bitwise(&mut self.words, &other.words, |a, b| a & b)
347    }
348}
349
350impl<T: Idx> From<GrowableBitSet<T>> for DenseBitSet<T> {
351    fn from(bit_set: GrowableBitSet<T>) -> Self {
352        bit_set.bit_set
353    }
354}
355
356impl<T> Clone for DenseBitSet<T> {
357    fn clone(&self) -> Self {
358        DenseBitSet {
359            domain_size: self.domain_size,
360            words: self.words.clone(),
361            marker: PhantomData,
362        }
363    }
364
365    fn clone_from(&mut self, from: &Self) {
366        self.domain_size = from.domain_size;
367        self.words.clone_from(&from.words);
368    }
369}
370
371impl<T: Idx> fmt::Debug for DenseBitSet<T> {
372    fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
373        w.debug_list().entries(self.iter()).finish()
374    }
375}
376
377impl<T: Idx> ToString for DenseBitSet<T> {
378    fn to_string(&self) -> String {
379        let mut result = String::new();
380        let mut sep = '[';
381
382        // Note: this is a little endian printout of bytes.
383
384        // i tracks how many bits we have printed so far.
385        let mut i = 0;
386        for word in &self.words {
387            let mut word = *word;
388            for _ in 0..WORD_BYTES {
389                // for each byte in `word`:
390                let remain = self.domain_size - i;
391                // If less than a byte remains, then mask just that many bits.
392                let mask = if remain <= 8 { (1 << remain) - 1 } else { 0xFF };
393                assert!(mask <= 0xFF);
394                let byte = word & mask;
395
396                result.push_str(&format!("{sep}{byte:02x}"));
397
398                if remain <= 8 {
399                    break;
400                }
401                word >>= 8;
402                i += 8;
403                sep = '-';
404            }
405            sep = '|';
406        }
407        result.push(']');
408
409        result
410    }
411}
412
413pub struct BitIter<'a, T: Idx> {
414    /// A copy of the current word, but with any already-visited bits cleared.
415    /// (This lets us use `trailing_zeros()` to find the next set bit.) When it
416    /// is reduced to 0, we move onto the next word.
417    word: Word,
418
419    /// The offset (measured in bits) of the current word.
420    offset: usize,
421
422    /// Underlying iterator over the words.
423    iter: slice::Iter<'a, Word>,
424
425    marker: PhantomData<T>,
426}
427
428impl<'a, T: Idx> BitIter<'a, T> {
429    #[inline]
430    fn new(words: &'a [Word]) -> BitIter<'a, T> {
431        // We initialize `word` and `offset` to degenerate values. On the first
432        // call to `next()` we will fall through to getting the first word from
433        // `iter`, which sets `word` to the first word (if there is one) and
434        // `offset` to 0. Doing it this way saves us from having to maintain
435        // additional state about whether we have started.
436        BitIter {
437            word: 0,
438            offset: usize::MAX - (WORD_BITS - 1),
439            iter: words.iter(),
440            marker: PhantomData,
441        }
442    }
443}
444
445impl<'a, T: Idx> Iterator for BitIter<'a, T> {
446    type Item = T;
447    fn next(&mut self) -> Option<T> {
448        loop {
449            if self.word != 0 {
450                // Get the position of the next set bit in the current word,
451                // then clear the bit.
452                let bit_pos = self.word.trailing_zeros() as usize;
453                self.word ^= 1 << bit_pos;
454                return Some(T::new(bit_pos + self.offset));
455            }
456
457            // Move onto the next word. `wrapping_add()` is needed to handle
458            // the degenerate initial value given to `offset` in `new()`.
459            self.word = *self.iter.next()?;
460            self.offset = self.offset.wrapping_add(WORD_BITS);
461        }
462    }
463}
464
465/// A fixed-size bitset type with a partially dense, partially sparse
466/// representation. The bitset is broken into chunks, and chunks that are all
467/// zeros or all ones are represented and handled very efficiently.
468///
469/// This type is especially efficient for sets that typically have a large
470/// `domain_size` with significant stretches of all zeros or all ones, and also
471/// some stretches with lots of 0s and 1s mixed in a way that causes trouble
472/// for `IntervalSet`.
473///
474/// Best used via `MixedBitSet`, rather than directly, because `MixedBitSet`
475/// has better performance for small bitsets.
476///
477/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
478/// just be `usize`.
479///
480/// All operations that involve an element will panic if the element is equal
481/// to or greater than the domain size. All operations that involve two bitsets
482/// will panic if the bitsets have differing domain sizes.
483#[derive(PartialEq, Eq)]
484pub struct ChunkedBitSet<T> {
485    domain_size: usize,
486
487    /// The chunks. Each one contains exactly CHUNK_BITS values, except the
488    /// last one which contains 1..=CHUNK_BITS values.
489    chunks: Box<[Chunk]>,
490
491    marker: PhantomData<T>,
492}
493
494// NOTE: The chunk size is computed on-the-fly on each manipulation of a chunk.
495// This avoids storing it, as it's almost always CHUNK_BITS except for the last one.
496#[derive(Clone, Debug, PartialEq, Eq)]
497enum Chunk {
498    /// A chunk that is all zeros; we don't represent the zeros explicitly.
499    Zeros,
500
501    /// A chunk that is all ones; we don't represent the ones explicitly.
502    Ones,
503
504    /// A chunk that has a mix of zeros and ones, which are represented
505    /// explicitly and densely. It never has all zeros or all ones.
506    ///
507    /// If this is the final chunk there may be excess, unused words. This
508    /// turns out to be both simpler and have better performance than
509    /// allocating the minimum number of words, largely because we avoid having
510    /// to store the length, which would make this type larger. These excess
511    /// words are always zero, as are any excess bits in the final in-use word.
512    ///
513    /// The `ChunkSize` field is the count of 1s set in the chunk, and
514    /// must satisfy `0 < count < chunk_domain_size`.
515    ///
516    /// The words are within an `Rc` because it's surprisingly common to
517    /// duplicate an entire chunk, e.g. in `ChunkedBitSet::clone_from()`, or
518    /// when a `Mixed` chunk is union'd into a `Zeros` chunk. When we do need
519    /// to modify a chunk we use `Rc::make_mut`.
520    Mixed(ChunkSize, Rc<[Word; CHUNK_WORDS]>),
521}
522
523// This type is used a lot. Make sure it doesn't unintentionally get bigger.
524#[cfg(target_pointer_width = "64")]
525crate::static_assert_size!(Chunk, 16);
526
527impl<T> ChunkedBitSet<T> {
528    pub fn domain_size(&self) -> usize {
529        self.domain_size
530    }
531
532    #[inline]
533    fn last_chunk_size(&self) -> ChunkSize {
534        let n = self.domain_size % CHUNK_BITS;
535        if n == 0 { CHUNK_BITS as ChunkSize } else { n as ChunkSize }
536    }
537
538    /// All the chunks have a chunk_domain_size of `CHUNK_BITS` except the final one.
539    #[inline]
540    fn chunk_domain_size(&self, chunk: usize) -> ChunkSize {
541        if chunk == self.chunks.len() - 1 {
542            self.last_chunk_size()
543        } else {
544            CHUNK_BITS as ChunkSize
545        }
546    }
547
548    #[cfg(test)]
549    fn assert_valid(&self) {
550        if self.domain_size == 0 {
551            assert!(self.chunks.is_empty());
552            return;
553        }
554
555        assert!((self.chunks.len() - 1) * CHUNK_BITS <= self.domain_size);
556        assert!(self.chunks.len() * CHUNK_BITS >= self.domain_size);
557        for (chunk_index, chunk) in self.chunks.iter().enumerate() {
558            let chunk_domain_size = self.chunk_domain_size(chunk_index);
559            chunk.assert_valid(chunk_domain_size);
560        }
561    }
562}
563
564impl<T: Idx> ChunkedBitSet<T> {
565    /// Creates a new bitset with a given `domain_size` and chunk kind.
566    fn new(domain_size: usize, is_empty: bool) -> Self {
567        let chunks = if domain_size == 0 {
568            Box::new([])
569        } else {
570            vec![if is_empty { Zeros } else { Ones }; num_chunks(domain_size)].into_boxed_slice()
571        };
572        ChunkedBitSet { domain_size, chunks, marker: PhantomData }
573    }
574
575    /// Creates a new, empty bitset with a given `domain_size`.
576    #[inline]
577    pub fn new_empty(domain_size: usize) -> Self {
578        ChunkedBitSet::new(domain_size, /* is_empty */ true)
579    }
580
581    /// Creates a new, filled bitset with a given `domain_size`.
582    #[inline]
583    pub fn new_filled(domain_size: usize) -> Self {
584        ChunkedBitSet::new(domain_size, /* is_empty */ false)
585    }
586
587    pub fn clear(&mut self) {
588        let domain_size = self.domain_size();
589        *self = ChunkedBitSet::new_empty(domain_size);
590    }
591
592    #[cfg(test)]
593    fn chunks(&self) -> &[Chunk] {
594        &self.chunks
595    }
596
597    /// Count the number of bits in the set.
598    pub fn count(&self) -> usize {
599        self.chunks
600            .iter()
601            .enumerate()
602            .map(|(index, chunk)| chunk.count(self.chunk_domain_size(index)))
603            .sum()
604    }
605
606    pub fn is_empty(&self) -> bool {
607        self.chunks.iter().all(|chunk| matches!(chunk, Zeros))
608    }
609
610    /// Returns `true` if `self` contains `elem`.
611    #[inline]
612    pub fn contains(&self, elem: T) -> bool {
613        assert!(elem.index() < self.domain_size);
614        let chunk = &self.chunks[chunk_index(elem)];
615        match &chunk {
616            Zeros => false,
617            Ones => true,
618            Mixed(_, words) => {
619                let (word_index, mask) = chunk_word_index_and_mask(elem);
620                (words[word_index] & mask) != 0
621            }
622        }
623    }
624
625    #[inline]
626    pub fn iter(&self) -> ChunkedBitIter<'_, T> {
627        ChunkedBitIter::new(self)
628    }
629
630    /// Insert `elem`. Returns whether the set has changed.
631    pub fn insert(&mut self, elem: T) -> bool {
632        assert!(elem.index() < self.domain_size);
633        let chunk_index = chunk_index(elem);
634        let chunk_domain_size = self.chunk_domain_size(chunk_index);
635        let chunk = &mut self.chunks[chunk_index];
636        match *chunk {
637            Zeros => {
638                if chunk_domain_size > 1 {
639                    #[cfg(feature = "nightly")]
640                    let mut words = {
641                        // We take some effort to avoid copying the words.
642                        let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
643                        // SAFETY: `words` can safely be all zeroes.
644                        unsafe { words.assume_init() }
645                    };
646                    #[cfg(not(feature = "nightly"))]
647                    let mut words = {
648                        // FIXME: unconditionally use `Rc::new_zeroed` once it is stable (#63291).
649                        let words = mem::MaybeUninit::<[Word; CHUNK_WORDS]>::zeroed();
650                        // SAFETY: `words` can safely be all zeroes.
651                        let words = unsafe { words.assume_init() };
652                        // Unfortunate possibly-large copy
653                        Rc::new(words)
654                    };
655                    let words_ref = Rc::get_mut(&mut words).unwrap();
656
657                    let (word_index, mask) = chunk_word_index_and_mask(elem);
658                    words_ref[word_index] |= mask;
659                    *chunk = Mixed(1, words);
660                } else {
661                    *chunk = Ones;
662                }
663                true
664            }
665            Ones => false,
666            Mixed(ref mut count, ref mut words) => {
667                // We skip all the work if the bit is already set.
668                let (word_index, mask) = chunk_word_index_and_mask(elem);
669                if (words[word_index] & mask) == 0 {
670                    *count += 1;
671                    if *count < chunk_domain_size {
672                        let words = Rc::make_mut(words);
673                        words[word_index] |= mask;
674                    } else {
675                        *chunk = Ones;
676                    }
677                    true
678                } else {
679                    false
680                }
681            }
682        }
683    }
684
685    /// Sets all bits to true.
686    pub fn insert_all(&mut self) {
687        for chunk in self.chunks.iter_mut() {
688            *chunk = Ones;
689        }
690    }
691
692    /// Returns `true` if the set has changed.
693    pub fn remove(&mut self, elem: T) -> bool {
694        assert!(elem.index() < self.domain_size);
695        let chunk_index = chunk_index(elem);
696        let chunk_domain_size = self.chunk_domain_size(chunk_index);
697        let chunk = &mut self.chunks[chunk_index];
698        match *chunk {
699            Zeros => false,
700            Ones => {
701                if chunk_domain_size > 1 {
702                    #[cfg(feature = "nightly")]
703                    let mut words = {
704                        // We take some effort to avoid copying the words.
705                        let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
706                        // SAFETY: `words` can safely be all zeroes.
707                        unsafe { words.assume_init() }
708                    };
709                    #[cfg(not(feature = "nightly"))]
710                    let mut words = {
711                        // FIXME: unconditionally use `Rc::new_zeroed` once it is stable (#63291).
712                        let words = mem::MaybeUninit::<[Word; CHUNK_WORDS]>::zeroed();
713                        // SAFETY: `words` can safely be all zeroes.
714                        let words = unsafe { words.assume_init() };
715                        // Unfortunate possibly-large copy
716                        Rc::new(words)
717                    };
718                    let words_ref = Rc::get_mut(&mut words).unwrap();
719
720                    // Set only the bits in use.
721                    let num_words = num_words(chunk_domain_size as usize);
722                    words_ref[..num_words].fill(!0);
723                    clear_excess_bits_in_final_word(
724                        chunk_domain_size as usize,
725                        &mut words_ref[..num_words],
726                    );
727                    let (word_index, mask) = chunk_word_index_and_mask(elem);
728                    words_ref[word_index] &= !mask;
729                    *chunk = Mixed(chunk_domain_size - 1, words);
730                } else {
731                    *chunk = Zeros;
732                }
733                true
734            }
735            Mixed(ref mut count, ref mut words) => {
736                // We skip all the work if the bit is already clear.
737                let (word_index, mask) = chunk_word_index_and_mask(elem);
738                if (words[word_index] & mask) != 0 {
739                    *count -= 1;
740                    if *count > 0 {
741                        let words = Rc::make_mut(words);
742                        words[word_index] &= !mask;
743                    } else {
744                        *chunk = Zeros
745                    }
746                    true
747                } else {
748                    false
749                }
750            }
751        }
752    }
753
754    fn chunk_iter(&self, chunk_index: usize) -> ChunkIter<'_> {
755        let chunk_domain_size = self.chunk_domain_size(chunk_index);
756        match self.chunks.get(chunk_index) {
757            Some(Zeros) => ChunkIter::Zeros,
758            Some(Ones) => ChunkIter::Ones(0..chunk_domain_size as usize),
759            Some(Mixed(_, words)) => {
760                let num_words = num_words(chunk_domain_size as usize);
761                ChunkIter::Mixed(BitIter::new(&words[0..num_words]))
762            }
763            None => ChunkIter::Finished,
764        }
765    }
766
767    bit_relations_inherent_impls! {}
768}
769
770impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
771    fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
772        assert_eq!(self.domain_size, other.domain_size);
773
774        let num_chunks = self.chunks.len();
775        debug_assert_eq!(num_chunks, other.chunks.len());
776
777        let last_chunk_size = self.last_chunk_size();
778        debug_assert_eq!(last_chunk_size, other.last_chunk_size());
779
780        let mut changed = false;
781        for (chunk_index, (mut self_chunk, other_chunk)) in
782            self.chunks.iter_mut().zip(other.chunks.iter()).enumerate()
783        {
784            let chunk_domain_size = if chunk_index + 1 == num_chunks {
785                last_chunk_size
786            } else {
787                CHUNK_BITS as ChunkSize
788            };
789
790            match (&mut self_chunk, &other_chunk) {
791                (_, Zeros) | (Ones, _) => {}
792                (Zeros, Ones) | (Mixed(..), Ones) | (Zeros, Mixed(..)) => {
793                    // `other_chunk` fully overwrites `self_chunk`
794                    *self_chunk = other_chunk.clone();
795                    changed = true;
796                }
797                (
798                    Mixed(self_chunk_count, self_chunk_words),
799                    Mixed(_other_chunk_count, other_chunk_words),
800                ) => {
801                    // First check if the operation would change
802                    // `self_chunk.words`. If not, we can avoid allocating some
803                    // words, and this happens often enough that it's a
804                    // performance win. Also, we only need to operate on the
805                    // in-use words, hence the slicing.
806                    let op = |a, b| a | b;
807                    let num_words = num_words(chunk_domain_size as usize);
808                    if bitwise_changes(
809                        &self_chunk_words[0..num_words],
810                        &other_chunk_words[0..num_words],
811                        op,
812                    ) {
813                        let self_chunk_words = Rc::make_mut(self_chunk_words);
814                        let has_changed = bitwise(
815                            &mut self_chunk_words[0..num_words],
816                            &other_chunk_words[0..num_words],
817                            op,
818                        );
819                        debug_assert!(has_changed);
820                        *self_chunk_count = self_chunk_words[0..num_words]
821                            .iter()
822                            .map(|w| w.count_ones() as ChunkSize)
823                            .sum();
824                        if *self_chunk_count == chunk_domain_size {
825                            *self_chunk = Ones;
826                        }
827                        changed = true;
828                    }
829                }
830            }
831        }
832        changed
833    }
834
835    fn subtract(&mut self, other: &ChunkedBitSet<T>) -> bool {
836        assert_eq!(self.domain_size, other.domain_size);
837
838        let num_chunks = self.chunks.len();
839        debug_assert_eq!(num_chunks, other.chunks.len());
840
841        let last_chunk_size = self.last_chunk_size();
842        debug_assert_eq!(last_chunk_size, other.last_chunk_size());
843
844        let mut changed = false;
845        for (chunk_index, (mut self_chunk, other_chunk)) in
846            self.chunks.iter_mut().zip(other.chunks.iter()).enumerate()
847        {
848            let chunk_domain_size = if chunk_index + 1 == num_chunks {
849                last_chunk_size
850            } else {
851                CHUNK_BITS as ChunkSize
852            };
853
854            match (&mut self_chunk, &other_chunk) {
855                (Zeros, _) | (_, Zeros) => {}
856                (Ones | Mixed(_, _), Ones) => {
857                    changed = true;
858                    *self_chunk = Zeros;
859                }
860                (Ones, Mixed(other_chunk_count, other_chunk_words)) => {
861                    changed = true;
862                    let num_words = num_words(chunk_domain_size as usize);
863                    debug_assert!(num_words > 0 && num_words <= CHUNK_WORDS);
864                    let mut tail_mask =
865                        1 << (chunk_domain_size - ((num_words - 1) * WORD_BITS) as u16) - 1;
866                    let mut self_chunk_words = **other_chunk_words;
867                    for word in self_chunk_words[0..num_words].iter_mut().rev() {
868                        *word = !*word & tail_mask;
869                        tail_mask = u64::MAX;
870                    }
871                    let self_chunk_count = chunk_domain_size - *other_chunk_count;
872                    debug_assert_eq!(
873                        self_chunk_count,
874                        self_chunk_words[0..num_words]
875                            .iter()
876                            .map(|w| w.count_ones() as ChunkSize)
877                            .sum()
878                    );
879                    *self_chunk = Mixed(self_chunk_count, Rc::new(self_chunk_words));
880                }
881                (
882                    Mixed(self_chunk_count, self_chunk_words),
883                    Mixed(_other_chunk_count, other_chunk_words),
884                ) => {
885                    // See [`<Self as BitRelations<ChunkedBitSet<T>>>::union`] for the explanation
886                    let op = |a: u64, b: u64| a & !b;
887                    let num_words = num_words(chunk_domain_size as usize);
888                    if bitwise_changes(
889                        &self_chunk_words[0..num_words],
890                        &other_chunk_words[0..num_words],
891                        op,
892                    ) {
893                        let self_chunk_words = Rc::make_mut(self_chunk_words);
894                        let has_changed = bitwise(
895                            &mut self_chunk_words[0..num_words],
896                            &other_chunk_words[0..num_words],
897                            op,
898                        );
899                        debug_assert!(has_changed);
900                        *self_chunk_count = self_chunk_words[0..num_words]
901                            .iter()
902                            .map(|w| w.count_ones() as ChunkSize)
903                            .sum();
904                        if *self_chunk_count == 0 {
905                            *self_chunk = Zeros;
906                        }
907                        changed = true;
908                    }
909                }
910            }
911        }
912        changed
913    }
914
915    fn intersect(&mut self, other: &ChunkedBitSet<T>) -> bool {
916        assert_eq!(self.domain_size, other.domain_size);
917
918        let num_chunks = self.chunks.len();
919        debug_assert_eq!(num_chunks, other.chunks.len());
920
921        let last_chunk_size = self.last_chunk_size();
922        debug_assert_eq!(last_chunk_size, other.last_chunk_size());
923
924        let mut changed = false;
925        for (chunk_index, (mut self_chunk, other_chunk)) in
926            self.chunks.iter_mut().zip(other.chunks.iter()).enumerate()
927        {
928            let chunk_domain_size = if chunk_index + 1 == num_chunks {
929                last_chunk_size
930            } else {
931                CHUNK_BITS as ChunkSize
932            };
933
934            match (&mut self_chunk, &other_chunk) {
935                (Zeros, _) | (_, Ones) => {}
936                (Ones, Zeros | Mixed(..)) | (Mixed(..), Zeros) => {
937                    changed = true;
938                    *self_chunk = other_chunk.clone();
939                }
940                (
941                    Mixed(self_chunk_count, self_chunk_words),
942                    Mixed(_other_chunk_count, other_chunk_words),
943                ) => {
944                    // See [`<Self as BitRelations<ChunkedBitSet<T>>>::union`] for the explanation
945                    let op = |a, b| a & b;
946                    let num_words = num_words(chunk_domain_size as usize);
947                    if bitwise_changes(
948                        &self_chunk_words[0..num_words],
949                        &other_chunk_words[0..num_words],
950                        op,
951                    ) {
952                        let self_chunk_words = Rc::make_mut(self_chunk_words);
953                        let has_changed = bitwise(
954                            &mut self_chunk_words[0..num_words],
955                            &other_chunk_words[0..num_words],
956                            op,
957                        );
958                        debug_assert!(has_changed);
959                        *self_chunk_count = self_chunk_words[0..num_words]
960                            .iter()
961                            .map(|w| w.count_ones() as ChunkSize)
962                            .sum();
963                        if *self_chunk_count == 0 {
964                            *self_chunk = Zeros;
965                        }
966                        changed = true;
967                    }
968                }
969            }
970        }
971
972        changed
973    }
974}
975
976impl<T: Idx> BitRelations<ChunkedBitSet<T>> for DenseBitSet<T> {
977    fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
978        sequential_update(|elem| self.insert(elem), other.iter())
979    }
980
981    fn subtract(&mut self, _other: &ChunkedBitSet<T>) -> bool {
982        unimplemented!("implement if/when necessary");
983    }
984
985    fn intersect(&mut self, other: &ChunkedBitSet<T>) -> bool {
986        assert_eq!(self.domain_size(), other.domain_size);
987        let mut changed = false;
988        for (i, chunk) in other.chunks.iter().enumerate() {
989            let mut words = &mut self.words[i * CHUNK_WORDS..];
990            if words.len() > CHUNK_WORDS {
991                words = &mut words[..CHUNK_WORDS];
992            }
993            match chunk {
994                Zeros => {
995                    for word in words {
996                        if *word != 0 {
997                            changed = true;
998                            *word = 0;
999                        }
1000                    }
1001                }
1002                Ones => (),
1003                Mixed(_, data) => {
1004                    for (i, word) in words.iter_mut().enumerate() {
1005                        let new_val = *word & data[i];
1006                        if new_val != *word {
1007                            changed = true;
1008                            *word = new_val;
1009                        }
1010                    }
1011                }
1012            }
1013        }
1014        changed
1015    }
1016}
1017
1018impl<T> Clone for ChunkedBitSet<T> {
1019    fn clone(&self) -> Self {
1020        ChunkedBitSet {
1021            domain_size: self.domain_size,
1022            chunks: self.chunks.clone(),
1023            marker: PhantomData,
1024        }
1025    }
1026
1027    /// WARNING: this implementation of clone_from will panic if the two
1028    /// bitsets have different domain sizes. This constraint is not inherent to
1029    /// `clone_from`, but it works with the existing call sites and allows a
1030    /// faster implementation, which is important because this function is hot.
1031    fn clone_from(&mut self, from: &Self) {
1032        assert_eq!(self.domain_size, from.domain_size);
1033        debug_assert_eq!(self.chunks.len(), from.chunks.len());
1034
1035        self.chunks.clone_from(&from.chunks)
1036    }
1037}
1038
1039pub struct ChunkedBitIter<'a, T: Idx> {
1040    bit_set: &'a ChunkedBitSet<T>,
1041
1042    // The index of the current chunk.
1043    chunk_index: usize,
1044
1045    // The sub-iterator for the current chunk.
1046    chunk_iter: ChunkIter<'a>,
1047}
1048
1049impl<'a, T: Idx> ChunkedBitIter<'a, T> {
1050    #[inline]
1051    fn new(bit_set: &'a ChunkedBitSet<T>) -> ChunkedBitIter<'a, T> {
1052        ChunkedBitIter { bit_set, chunk_index: 0, chunk_iter: bit_set.chunk_iter(0) }
1053    }
1054}
1055
1056impl<'a, T: Idx> Iterator for ChunkedBitIter<'a, T> {
1057    type Item = T;
1058
1059    fn next(&mut self) -> Option<T> {
1060        loop {
1061            match &mut self.chunk_iter {
1062                ChunkIter::Zeros => {}
1063                ChunkIter::Ones(iter) => {
1064                    if let Some(next) = iter.next() {
1065                        return Some(T::new(next + self.chunk_index * CHUNK_BITS));
1066                    }
1067                }
1068                ChunkIter::Mixed(iter) => {
1069                    if let Some(next) = iter.next() {
1070                        return Some(T::new(next + self.chunk_index * CHUNK_BITS));
1071                    }
1072                }
1073                ChunkIter::Finished => return None,
1074            }
1075            self.chunk_index += 1;
1076            self.chunk_iter = self.bit_set.chunk_iter(self.chunk_index);
1077        }
1078    }
1079}
1080
1081impl Chunk {
1082    #[cfg(test)]
1083    fn assert_valid(&self, chunk_domain_size: ChunkSize) {
1084        assert!(chunk_domain_size as usize <= CHUNK_BITS);
1085        match *self {
1086            Zeros | Ones => {}
1087            Mixed(count, ref words) => {
1088                assert!(0 < count && count < chunk_domain_size);
1089
1090                // Check the number of set bits matches `count`.
1091                assert_eq!(
1092                    words.iter().map(|w| w.count_ones() as ChunkSize).sum::<ChunkSize>(),
1093                    count
1094                );
1095
1096                // Check the not-in-use words are all zeroed.
1097                let num_words = num_words(chunk_domain_size as usize);
1098                if num_words < CHUNK_WORDS {
1099                    assert_eq!(
1100                        words[num_words..]
1101                            .iter()
1102                            .map(|w| w.count_ones() as ChunkSize)
1103                            .sum::<ChunkSize>(),
1104                        0
1105                    );
1106                }
1107            }
1108        }
1109    }
1110
1111    /// Count the number of 1s in the chunk.
1112    fn count(&self, chunk_domain_size: ChunkSize) -> usize {
1113        match *self {
1114            Zeros => 0,
1115            Ones => chunk_domain_size as usize,
1116            Mixed(count, _) => count as usize,
1117        }
1118    }
1119}
1120
1121enum ChunkIter<'a> {
1122    Zeros,
1123    Ones(Range<usize>),
1124    Mixed(BitIter<'a, usize>),
1125    Finished,
1126}
1127
1128// Applies a function to mutate a bitset, and returns true if any
1129// of the applications return true
1130fn sequential_update<T: Idx>(
1131    mut self_update: impl FnMut(T) -> bool,
1132    it: impl Iterator<Item = T>,
1133) -> bool {
1134    it.fold(false, |changed, elem| self_update(elem) | changed)
1135}
1136
1137impl<T: Idx> fmt::Debug for ChunkedBitSet<T> {
1138    fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
1139        w.debug_list().entries(self.iter()).finish()
1140    }
1141}
1142
1143/// Sets `out_vec[i] = op(out_vec[i], in_vec[i])` for each index `i` in both
1144/// slices. The slices must have the same length.
1145///
1146/// Returns true if at least one bit in `out_vec` was changed.
1147///
1148/// ## Warning
1149/// Some bitwise operations (e.g. union-not, xor) can set output bits that were
1150/// unset in in both inputs. If this happens in the last word/chunk of a bitset,
1151/// it can cause the bitset to contain out-of-domain values, which need to
1152/// be cleared with `clear_excess_bits_in_final_word`. This also makes the
1153/// "changed" return value unreliable, because the change might have only
1154/// affected excess bits.
1155#[inline]
1156fn bitwise<Op>(out_vec: &mut [Word], in_vec: &[Word], op: Op) -> bool
1157where
1158    Op: Fn(Word, Word) -> Word,
1159{
1160    assert_eq!(out_vec.len(), in_vec.len());
1161    let mut changed = 0;
1162    for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
1163        let old_val = *out_elem;
1164        let new_val = op(old_val, *in_elem);
1165        *out_elem = new_val;
1166        // This is essentially equivalent to a != with changed being a bool, but
1167        // in practice this code gets auto-vectorized by the compiler for most
1168        // operators. Using != here causes us to generate quite poor code as the
1169        // compiler tries to go back to a boolean on each loop iteration.
1170        changed |= old_val ^ new_val;
1171    }
1172    changed != 0
1173}
1174
1175/// Does this bitwise operation change `out_vec`?
1176#[inline]
1177fn bitwise_changes<Op>(out_vec: &[Word], in_vec: &[Word], op: Op) -> bool
1178where
1179    Op: Fn(Word, Word) -> Word,
1180{
1181    assert_eq!(out_vec.len(), in_vec.len());
1182    for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
1183        let old_val = *out_elem;
1184        let new_val = op(old_val, *in_elem);
1185        if old_val != new_val {
1186            return true;
1187        }
1188    }
1189    false
1190}
1191
1192/// A bitset with a mixed representation, using `DenseBitSet` for small and
1193/// medium bitsets, and `ChunkedBitSet` for large bitsets, i.e. those with
1194/// enough bits for at least two chunks. This is a good choice for many bitsets
1195/// that can have large domain sizes (e.g. 5000+).
1196///
1197/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
1198/// just be `usize`.
1199///
1200/// All operations that involve an element will panic if the element is equal
1201/// to or greater than the domain size. All operations that involve two bitsets
1202/// will panic if the bitsets have differing domain sizes.
1203#[derive(PartialEq, Eq)]
1204pub enum MixedBitSet<T> {
1205    Small(DenseBitSet<T>),
1206    Large(ChunkedBitSet<T>),
1207}
1208
1209impl<T> MixedBitSet<T> {
1210    pub fn domain_size(&self) -> usize {
1211        match self {
1212            MixedBitSet::Small(set) => set.domain_size(),
1213            MixedBitSet::Large(set) => set.domain_size(),
1214        }
1215    }
1216}
1217
1218impl<T: Idx> MixedBitSet<T> {
1219    #[inline]
1220    pub fn new_empty(domain_size: usize) -> MixedBitSet<T> {
1221        if domain_size <= CHUNK_BITS {
1222            MixedBitSet::Small(DenseBitSet::new_empty(domain_size))
1223        } else {
1224            MixedBitSet::Large(ChunkedBitSet::new_empty(domain_size))
1225        }
1226    }
1227
1228    #[inline]
1229    pub fn is_empty(&self) -> bool {
1230        match self {
1231            MixedBitSet::Small(set) => set.is_empty(),
1232            MixedBitSet::Large(set) => set.is_empty(),
1233        }
1234    }
1235
1236    #[inline]
1237    pub fn contains(&self, elem: T) -> bool {
1238        match self {
1239            MixedBitSet::Small(set) => set.contains(elem),
1240            MixedBitSet::Large(set) => set.contains(elem),
1241        }
1242    }
1243
1244    #[inline]
1245    pub fn insert(&mut self, elem: T) -> bool {
1246        match self {
1247            MixedBitSet::Small(set) => set.insert(elem),
1248            MixedBitSet::Large(set) => set.insert(elem),
1249        }
1250    }
1251
1252    pub fn insert_all(&mut self) {
1253        match self {
1254            MixedBitSet::Small(set) => set.insert_all(),
1255            MixedBitSet::Large(set) => set.insert_all(),
1256        }
1257    }
1258
1259    #[inline]
1260    pub fn remove(&mut self, elem: T) -> bool {
1261        match self {
1262            MixedBitSet::Small(set) => set.remove(elem),
1263            MixedBitSet::Large(set) => set.remove(elem),
1264        }
1265    }
1266
1267    pub fn iter(&self) -> MixedBitIter<'_, T> {
1268        match self {
1269            MixedBitSet::Small(set) => MixedBitIter::Small(set.iter()),
1270            MixedBitSet::Large(set) => MixedBitIter::Large(set.iter()),
1271        }
1272    }
1273
1274    #[inline]
1275    pub fn clear(&mut self) {
1276        match self {
1277            MixedBitSet::Small(set) => set.clear(),
1278            MixedBitSet::Large(set) => set.clear(),
1279        }
1280    }
1281
1282    bit_relations_inherent_impls! {}
1283}
1284
1285impl<T> Clone for MixedBitSet<T> {
1286    fn clone(&self) -> Self {
1287        match self {
1288            MixedBitSet::Small(set) => MixedBitSet::Small(set.clone()),
1289            MixedBitSet::Large(set) => MixedBitSet::Large(set.clone()),
1290        }
1291    }
1292
1293    /// WARNING: this implementation of clone_from may panic if the two
1294    /// bitsets have different domain sizes. This constraint is not inherent to
1295    /// `clone_from`, but it works with the existing call sites and allows a
1296    /// faster implementation, which is important because this function is hot.
1297    fn clone_from(&mut self, from: &Self) {
1298        match (self, from) {
1299            (MixedBitSet::Small(set), MixedBitSet::Small(from)) => set.clone_from(from),
1300            (MixedBitSet::Large(set), MixedBitSet::Large(from)) => set.clone_from(from),
1301            _ => panic!("MixedBitSet size mismatch"),
1302        }
1303    }
1304}
1305
1306impl<T: Idx> BitRelations<MixedBitSet<T>> for MixedBitSet<T> {
1307    fn union(&mut self, other: &MixedBitSet<T>) -> bool {
1308        match (self, other) {
1309            (MixedBitSet::Small(set), MixedBitSet::Small(other)) => set.union(other),
1310            (MixedBitSet::Large(set), MixedBitSet::Large(other)) => set.union(other),
1311            _ => panic!("MixedBitSet size mismatch"),
1312        }
1313    }
1314
1315    fn subtract(&mut self, other: &MixedBitSet<T>) -> bool {
1316        match (self, other) {
1317            (MixedBitSet::Small(set), MixedBitSet::Small(other)) => set.subtract(other),
1318            (MixedBitSet::Large(set), MixedBitSet::Large(other)) => set.subtract(other),
1319            _ => panic!("MixedBitSet size mismatch"),
1320        }
1321    }
1322
1323    fn intersect(&mut self, _other: &MixedBitSet<T>) -> bool {
1324        unimplemented!("implement if/when necessary");
1325    }
1326}
1327
1328impl<T: Idx> fmt::Debug for MixedBitSet<T> {
1329    fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
1330        match self {
1331            MixedBitSet::Small(set) => set.fmt(w),
1332            MixedBitSet::Large(set) => set.fmt(w),
1333        }
1334    }
1335}
1336
1337pub enum MixedBitIter<'a, T: Idx> {
1338    Small(BitIter<'a, T>),
1339    Large(ChunkedBitIter<'a, T>),
1340}
1341
1342impl<'a, T: Idx> Iterator for MixedBitIter<'a, T> {
1343    type Item = T;
1344    fn next(&mut self) -> Option<T> {
1345        match self {
1346            MixedBitIter::Small(iter) => iter.next(),
1347            MixedBitIter::Large(iter) => iter.next(),
1348        }
1349    }
1350}
1351
1352/// A resizable bitset type with a dense representation.
1353///
1354/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
1355/// just be `usize`.
1356///
1357/// All operations that involve an element will panic if the element is equal
1358/// to or greater than the domain size.
1359#[derive(Clone, Debug, PartialEq)]
1360pub struct GrowableBitSet<T: Idx> {
1361    bit_set: DenseBitSet<T>,
1362}
1363
1364impl<T: Idx> Default for GrowableBitSet<T> {
1365    fn default() -> Self {
1366        GrowableBitSet::new_empty()
1367    }
1368}
1369
1370impl<T: Idx> GrowableBitSet<T> {
1371    /// Ensure that the set can hold at least `min_domain_size` elements.
1372    pub fn ensure(&mut self, min_domain_size: usize) {
1373        if self.bit_set.domain_size < min_domain_size {
1374            self.bit_set.domain_size = min_domain_size;
1375        }
1376
1377        let min_num_words = num_words(min_domain_size);
1378        if self.bit_set.words.len() < min_num_words {
1379            self.bit_set.words.resize(min_num_words, 0)
1380        }
1381    }
1382
1383    pub fn new_empty() -> GrowableBitSet<T> {
1384        GrowableBitSet { bit_set: DenseBitSet::new_empty(0) }
1385    }
1386
1387    pub fn with_capacity(capacity: usize) -> GrowableBitSet<T> {
1388        GrowableBitSet { bit_set: DenseBitSet::new_empty(capacity) }
1389    }
1390
1391    /// Returns `true` if the set has changed.
1392    #[inline]
1393    pub fn insert(&mut self, elem: T) -> bool {
1394        self.ensure(elem.index() + 1);
1395        self.bit_set.insert(elem)
1396    }
1397
1398    /// Returns `true` if the set has changed.
1399    #[inline]
1400    pub fn remove(&mut self, elem: T) -> bool {
1401        self.ensure(elem.index() + 1);
1402        self.bit_set.remove(elem)
1403    }
1404
1405    #[inline]
1406    pub fn is_empty(&self) -> bool {
1407        self.bit_set.is_empty()
1408    }
1409
1410    #[inline]
1411    pub fn contains(&self, elem: T) -> bool {
1412        let (word_index, mask) = word_index_and_mask(elem);
1413        self.bit_set.words.get(word_index).is_some_and(|word| (word & mask) != 0)
1414    }
1415
1416    #[inline]
1417    pub fn iter(&self) -> BitIter<'_, T> {
1418        self.bit_set.iter()
1419    }
1420
1421    #[inline]
1422    pub fn len(&self) -> usize {
1423        self.bit_set.count()
1424    }
1425}
1426
1427impl<T: Idx> From<DenseBitSet<T>> for GrowableBitSet<T> {
1428    fn from(bit_set: DenseBitSet<T>) -> Self {
1429        Self { bit_set }
1430    }
1431}
1432
1433/// A fixed-size 2D bit matrix type with a dense representation.
1434///
1435/// `R` and `C` are index types used to identify rows and columns respectively;
1436/// typically newtyped `usize` wrappers, but they can also just be `usize`.
1437///
1438/// All operations that involve a row and/or column index will panic if the
1439/// index exceeds the relevant bound.
1440#[cfg_attr(feature = "nightly", derive(Decodable_NoContext, Encodable_NoContext))]
1441#[derive(Clone, Eq, PartialEq, Hash)]
1442pub struct BitMatrix<R: Idx, C: Idx> {
1443    num_rows: usize,
1444    num_columns: usize,
1445    words: SmallVec<[Word; 2]>,
1446    marker: PhantomData<(R, C)>,
1447}
1448
1449impl<R: Idx, C: Idx> BitMatrix<R, C> {
1450    /// Creates a new `rows x columns` matrix, initially empty.
1451    pub fn new(num_rows: usize, num_columns: usize) -> BitMatrix<R, C> {
1452        // For every element, we need one bit for every other
1453        // element. Round up to an even number of words.
1454        let words_per_row = num_words(num_columns);
1455        BitMatrix {
1456            num_rows,
1457            num_columns,
1458            words: smallvec![0; num_rows * words_per_row],
1459            marker: PhantomData,
1460        }
1461    }
1462
1463    /// Creates a new matrix, with `row` used as the value for every row.
1464    pub fn from_row_n(row: &DenseBitSet<C>, num_rows: usize) -> BitMatrix<R, C> {
1465        let num_columns = row.domain_size();
1466        let words_per_row = num_words(num_columns);
1467        assert_eq!(words_per_row, row.words.len());
1468        BitMatrix {
1469            num_rows,
1470            num_columns,
1471            words: iter::repeat(&row.words).take(num_rows).flatten().cloned().collect(),
1472            marker: PhantomData,
1473        }
1474    }
1475
1476    pub fn rows(&self) -> impl Iterator<Item = R> {
1477        (0..self.num_rows).map(R::new)
1478    }
1479
1480    /// The range of bits for a given row.
1481    fn range(&self, row: R) -> (usize, usize) {
1482        let words_per_row = num_words(self.num_columns);
1483        let start = row.index() * words_per_row;
1484        (start, start + words_per_row)
1485    }
1486
1487    /// Sets the cell at `(row, column)` to true. Put another way, insert
1488    /// `column` to the bitset for `row`.
1489    ///
1490    /// Returns `true` if this changed the matrix.
1491    pub fn insert(&mut self, row: R, column: C) -> bool {
1492        assert!(row.index() < self.num_rows && column.index() < self.num_columns);
1493        let (start, _) = self.range(row);
1494        let (word_index, mask) = word_index_and_mask(column);
1495        let words = &mut self.words[..];
1496        let word = words[start + word_index];
1497        let new_word = word | mask;
1498        words[start + word_index] = new_word;
1499        word != new_word
1500    }
1501
1502    /// Do the bits from `row` contain `column`? Put another way, is
1503    /// the matrix cell at `(row, column)` true?  Put yet another way,
1504    /// if the matrix represents (transitive) reachability, can
1505    /// `row` reach `column`?
1506    pub fn contains(&self, row: R, column: C) -> bool {
1507        assert!(row.index() < self.num_rows && column.index() < self.num_columns);
1508        let (start, _) = self.range(row);
1509        let (word_index, mask) = word_index_and_mask(column);
1510        (self.words[start + word_index] & mask) != 0
1511    }
1512
1513    /// Returns those indices that are true in rows `a` and `b`. This
1514    /// is an *O*(*n*) operation where *n* is the number of elements
1515    /// (somewhat independent from the actual size of the
1516    /// intersection, in particular).
1517    pub fn intersect_rows(&self, row1: R, row2: R) -> Vec<C> {
1518        assert!(row1.index() < self.num_rows && row2.index() < self.num_rows);
1519        let (row1_start, row1_end) = self.range(row1);
1520        let (row2_start, row2_end) = self.range(row2);
1521        let mut result = Vec::with_capacity(self.num_columns);
1522        for (base, (i, j)) in (row1_start..row1_end).zip(row2_start..row2_end).enumerate() {
1523            let mut v = self.words[i] & self.words[j];
1524            for bit in 0..WORD_BITS {
1525                if v == 0 {
1526                    break;
1527                }
1528                if v & 0x1 != 0 {
1529                    result.push(C::new(base * WORD_BITS + bit));
1530                }
1531                v >>= 1;
1532            }
1533        }
1534        result
1535    }
1536
1537    /// Adds the bits from row `read` to the bits from row `write`, and
1538    /// returns `true` if anything changed.
1539    ///
1540    /// This is used when computing transitive reachability because if
1541    /// you have an edge `write -> read`, because in that case
1542    /// `write` can reach everything that `read` can (and
1543    /// potentially more).
1544    pub fn union_rows(&mut self, read: R, write: R) -> bool {
1545        assert!(read.index() < self.num_rows && write.index() < self.num_rows);
1546        let (read_start, read_end) = self.range(read);
1547        let (write_start, write_end) = self.range(write);
1548        let words = &mut self.words[..];
1549        let mut changed = 0;
1550        for (read_index, write_index) in iter::zip(read_start..read_end, write_start..write_end) {
1551            let word = words[write_index];
1552            let new_word = word | words[read_index];
1553            words[write_index] = new_word;
1554            // See `bitwise` for the rationale.
1555            changed |= word ^ new_word;
1556        }
1557        changed != 0
1558    }
1559
1560    /// Adds the bits from `with` to the bits from row `write`, and
1561    /// returns `true` if anything changed.
1562    pub fn union_row_with(&mut self, with: &DenseBitSet<C>, write: R) -> bool {
1563        assert!(write.index() < self.num_rows);
1564        assert_eq!(with.domain_size(), self.num_columns);
1565        let (write_start, write_end) = self.range(write);
1566        bitwise(&mut self.words[write_start..write_end], &with.words, |a, b| a | b)
1567    }
1568
1569    /// Sets every cell in `row` to true.
1570    pub fn insert_all_into_row(&mut self, row: R) {
1571        assert!(row.index() < self.num_rows);
1572        let (start, end) = self.range(row);
1573        let words = &mut self.words[..];
1574        for index in start..end {
1575            words[index] = !0;
1576        }
1577        clear_excess_bits_in_final_word(self.num_columns, &mut self.words[..end]);
1578    }
1579
1580    /// Gets a slice of the underlying words.
1581    pub fn words(&self) -> &[Word] {
1582        &self.words
1583    }
1584
1585    /// Iterates through all the columns set to true in a given row of
1586    /// the matrix.
1587    pub fn iter(&self, row: R) -> BitIter<'_, C> {
1588        assert!(row.index() < self.num_rows);
1589        let (start, end) = self.range(row);
1590        BitIter::new(&self.words[start..end])
1591    }
1592
1593    /// Returns the number of elements in `row`.
1594    pub fn count(&self, row: R) -> usize {
1595        let (start, end) = self.range(row);
1596        self.words[start..end].iter().map(|e| e.count_ones() as usize).sum()
1597    }
1598}
1599
1600impl<R: Idx, C: Idx> fmt::Debug for BitMatrix<R, C> {
1601    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1602        /// Forces its contents to print in regular mode instead of alternate mode.
1603        struct OneLinePrinter<T>(T);
1604        impl<T: fmt::Debug> fmt::Debug for OneLinePrinter<T> {
1605            fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1606                write!(fmt, "{:?}", self.0)
1607            }
1608        }
1609
1610        write!(fmt, "BitMatrix({}x{}) ", self.num_rows, self.num_columns)?;
1611        let items = self.rows().flat_map(|r| self.iter(r).map(move |c| (r, c)));
1612        fmt.debug_set().entries(items.map(OneLinePrinter)).finish()
1613    }
1614}
1615
1616/// A fixed-column-size, variable-row-size 2D bit matrix with a moderately
1617/// sparse representation.
1618///
1619/// Initially, every row has no explicit representation. If any bit within a row
1620/// is set, the entire row is instantiated as `Some(<DenseBitSet>)`.
1621/// Furthermore, any previously uninstantiated rows prior to it will be
1622/// instantiated as `None`. Those prior rows may themselves become fully
1623/// instantiated later on if any of their bits are set.
1624///
1625/// `R` and `C` are index types used to identify rows and columns respectively;
1626/// typically newtyped `usize` wrappers, but they can also just be `usize`.
1627#[derive(Clone, Debug)]
1628pub struct SparseBitMatrix<R, C>
1629where
1630    R: Idx,
1631    C: Idx,
1632{
1633    num_columns: usize,
1634    rows: IndexVec<R, Option<DenseBitSet<C>>>,
1635}
1636
1637impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
1638    /// Creates a new empty sparse bit matrix with no rows or columns.
1639    pub fn new(num_columns: usize) -> Self {
1640        Self { num_columns, rows: IndexVec::new() }
1641    }
1642
1643    fn ensure_row(&mut self, row: R) -> &mut DenseBitSet<C> {
1644        // Instantiate any missing rows up to and including row `row` with an empty `DenseBitSet`.
1645        // Then replace row `row` with a full `DenseBitSet` if necessary.
1646        self.rows.get_or_insert_with(row, || DenseBitSet::new_empty(self.num_columns))
1647    }
1648
1649    /// Sets the cell at `(row, column)` to true. Put another way, insert
1650    /// `column` to the bitset for `row`.
1651    ///
1652    /// Returns `true` if this changed the matrix.
1653    pub fn insert(&mut self, row: R, column: C) -> bool {
1654        self.ensure_row(row).insert(column)
1655    }
1656
1657    /// Sets the cell at `(row, column)` to false. Put another way, delete
1658    /// `column` from the bitset for `row`. Has no effect if `row` does not
1659    /// exist.
1660    ///
1661    /// Returns `true` if this changed the matrix.
1662    pub fn remove(&mut self, row: R, column: C) -> bool {
1663        match self.rows.get_mut(row) {
1664            Some(Some(row)) => row.remove(column),
1665            _ => false,
1666        }
1667    }
1668
1669    /// Sets all columns at `row` to false. Has no effect if `row` does
1670    /// not exist.
1671    pub fn clear(&mut self, row: R) {
1672        if let Some(Some(row)) = self.rows.get_mut(row) {
1673            row.clear();
1674        }
1675    }
1676
1677    /// Do the bits from `row` contain `column`? Put another way, is
1678    /// the matrix cell at `(row, column)` true?  Put yet another way,
1679    /// if the matrix represents (transitive) reachability, can
1680    /// `row` reach `column`?
1681    pub fn contains(&self, row: R, column: C) -> bool {
1682        self.row(row).is_some_and(|r| r.contains(column))
1683    }
1684
1685    /// Adds the bits from row `read` to the bits from row `write`, and
1686    /// returns `true` if anything changed.
1687    ///
1688    /// This is used when computing transitive reachability because if
1689    /// you have an edge `write -> read`, because in that case
1690    /// `write` can reach everything that `read` can (and
1691    /// potentially more).
1692    pub fn union_rows(&mut self, read: R, write: R) -> bool {
1693        if read == write || self.row(read).is_none() {
1694            return false;
1695        }
1696
1697        self.ensure_row(write);
1698        if let (Some(read_row), Some(write_row)) = self.rows.pick2_mut(read, write) {
1699            write_row.union(read_row)
1700        } else {
1701            unreachable!()
1702        }
1703    }
1704
1705    /// Insert all bits in the given row.
1706    pub fn insert_all_into_row(&mut self, row: R) {
1707        self.ensure_row(row).insert_all();
1708    }
1709
1710    pub fn rows(&self) -> impl Iterator<Item = R> {
1711        self.rows.indices()
1712    }
1713
1714    /// Iterates through all the columns set to true in a given row of
1715    /// the matrix.
1716    pub fn iter(&self, row: R) -> impl Iterator<Item = C> {
1717        self.row(row).into_iter().flat_map(|r| r.iter())
1718    }
1719
1720    pub fn row(&self, row: R) -> Option<&DenseBitSet<C>> {
1721        self.rows.get(row)?.as_ref()
1722    }
1723
1724    /// Intersects `row` with `set`. `set` can be either `DenseBitSet` or
1725    /// `ChunkedBitSet`. Has no effect if `row` does not exist.
1726    ///
1727    /// Returns true if the row was changed.
1728    pub fn intersect_row<Set>(&mut self, row: R, set: &Set) -> bool
1729    where
1730        DenseBitSet<C>: BitRelations<Set>,
1731    {
1732        match self.rows.get_mut(row) {
1733            Some(Some(row)) => row.intersect(set),
1734            _ => false,
1735        }
1736    }
1737
1738    /// Subtracts `set` from `row`. `set` can be either `DenseBitSet` or
1739    /// `ChunkedBitSet`. Has no effect if `row` does not exist.
1740    ///
1741    /// Returns true if the row was changed.
1742    pub fn subtract_row<Set>(&mut self, row: R, set: &Set) -> bool
1743    where
1744        DenseBitSet<C>: BitRelations<Set>,
1745    {
1746        match self.rows.get_mut(row) {
1747            Some(Some(row)) => row.subtract(set),
1748            _ => false,
1749        }
1750    }
1751
1752    /// Unions `row` with `set`. `set` can be either `DenseBitSet` or
1753    /// `ChunkedBitSet`.
1754    ///
1755    /// Returns true if the row was changed.
1756    pub fn union_row<Set>(&mut self, row: R, set: &Set) -> bool
1757    where
1758        DenseBitSet<C>: BitRelations<Set>,
1759    {
1760        self.ensure_row(row).union(set)
1761    }
1762}
1763
1764#[inline]
1765fn num_words<T: Idx>(domain_size: T) -> usize {
1766    domain_size.index().div_ceil(WORD_BITS)
1767}
1768
1769#[inline]
1770fn num_chunks<T: Idx>(domain_size: T) -> usize {
1771    assert!(domain_size.index() > 0);
1772    domain_size.index().div_ceil(CHUNK_BITS)
1773}
1774
1775#[inline]
1776fn word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
1777    let elem = elem.index();
1778    let word_index = elem / WORD_BITS;
1779    let mask = 1 << (elem % WORD_BITS);
1780    (word_index, mask)
1781}
1782
1783#[inline]
1784fn chunk_index<T: Idx>(elem: T) -> usize {
1785    elem.index() / CHUNK_BITS
1786}
1787
1788#[inline]
1789fn chunk_word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
1790    let chunk_elem = elem.index() % CHUNK_BITS;
1791    word_index_and_mask(chunk_elem)
1792}
1793
1794fn clear_excess_bits_in_final_word(domain_size: usize, words: &mut [Word]) {
1795    let num_bits_in_final_word = domain_size % WORD_BITS;
1796    if num_bits_in_final_word > 0 {
1797        let mask = (1 << num_bits_in_final_word) - 1;
1798        words[words.len() - 1] &= mask;
1799    }
1800}
1801
1802#[inline]
1803fn max_bit(word: Word) -> usize {
1804    WORD_BITS - 1 - word.leading_zeros() as usize
1805}
1806
1807/// Integral type used to represent the bit set.
1808pub trait FiniteBitSetTy:
1809    BitAnd<Output = Self>
1810    + BitAndAssign
1811    + BitOrAssign
1812    + Clone
1813    + Copy
1814    + Shl
1815    + Not<Output = Self>
1816    + PartialEq
1817    + Sized
1818{
1819    /// Size of the domain representable by this type, e.g. 64 for `u64`.
1820    const DOMAIN_SIZE: u32;
1821
1822    /// Value which represents the `FiniteBitSet` having every bit set.
1823    const FILLED: Self;
1824    /// Value which represents the `FiniteBitSet` having no bits set.
1825    const EMPTY: Self;
1826
1827    /// Value for one as the integral type.
1828    const ONE: Self;
1829    /// Value for zero as the integral type.
1830    const ZERO: Self;
1831
1832    /// Perform a checked left shift on the integral type.
1833    fn checked_shl(self, rhs: u32) -> Option<Self>;
1834    /// Perform a checked right shift on the integral type.
1835    fn checked_shr(self, rhs: u32) -> Option<Self>;
1836}
1837
1838impl FiniteBitSetTy for u32 {
1839    const DOMAIN_SIZE: u32 = 32;
1840
1841    const FILLED: Self = Self::MAX;
1842    const EMPTY: Self = Self::MIN;
1843
1844    const ONE: Self = 1u32;
1845    const ZERO: Self = 0u32;
1846
1847    fn checked_shl(self, rhs: u32) -> Option<Self> {
1848        self.checked_shl(rhs)
1849    }
1850
1851    fn checked_shr(self, rhs: u32) -> Option<Self> {
1852        self.checked_shr(rhs)
1853    }
1854}
1855
1856impl std::fmt::Debug for FiniteBitSet<u32> {
1857    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1858        write!(f, "{:032b}", self.0)
1859    }
1860}
1861
1862/// A fixed-sized bitset type represented by an integer type. Indices outwith than the range
1863/// representable by `T` are considered set.
1864#[cfg_attr(feature = "nightly", derive(Decodable_NoContext, Encodable_NoContext))]
1865#[derive(Copy, Clone, Eq, PartialEq)]
1866pub struct FiniteBitSet<T: FiniteBitSetTy>(pub T);
1867
1868impl<T: FiniteBitSetTy> FiniteBitSet<T> {
1869    /// Creates a new, empty bitset.
1870    pub fn new_empty() -> Self {
1871        Self(T::EMPTY)
1872    }
1873
1874    /// Sets the `index`th bit.
1875    pub fn set(&mut self, index: u32) {
1876        self.0 |= T::ONE.checked_shl(index).unwrap_or(T::ZERO);
1877    }
1878
1879    /// Unsets the `index`th bit.
1880    pub fn clear(&mut self, index: u32) {
1881        self.0 &= !T::ONE.checked_shl(index).unwrap_or(T::ZERO);
1882    }
1883
1884    /// Sets the `i`th to `j`th bits.
1885    pub fn set_range(&mut self, range: Range<u32>) {
1886        let bits = T::FILLED
1887            .checked_shl(range.end - range.start)
1888            .unwrap_or(T::ZERO)
1889            .not()
1890            .checked_shl(range.start)
1891            .unwrap_or(T::ZERO);
1892        self.0 |= bits;
1893    }
1894
1895    /// Is the set empty?
1896    pub fn is_empty(&self) -> bool {
1897        self.0 == T::EMPTY
1898    }
1899
1900    /// Returns the domain size of the bitset.
1901    pub fn within_domain(&self, index: u32) -> bool {
1902        index < T::DOMAIN_SIZE
1903    }
1904
1905    /// Returns if the `index`th bit is set.
1906    pub fn contains(&self, index: u32) -> Option<bool> {
1907        self.within_domain(index)
1908            .then(|| ((self.0.checked_shr(index).unwrap_or(T::ONE)) & T::ONE) == T::ONE)
1909    }
1910}
1911
1912impl<T: FiniteBitSetTy> Default for FiniteBitSet<T> {
1913    fn default() -> Self {
1914        Self::new_empty()
1915    }
1916}