indexmap/map/core/
extract.rs

1#![allow(unsafe_code)]
2
3use super::{Bucket, IndexMapCore};
4use crate::util::simplify_range;
5
6use core::ops::RangeBounds;
7
8impl<K, V> IndexMapCore<K, V> {
9    #[track_caller]
10    pub(crate) fn extract<R>(&mut self, range: R) -> ExtractCore<'_, K, V>
11    where
12        R: RangeBounds<usize>,
13    {
14        let range = simplify_range(range, self.entries.len());
15
16        // SAFETY: We must have consistent lengths to start, so that's a hard assertion.
17        // Then the worst `set_len` can do is leak items if `ExtractCore` doesn't drop.
18        assert_eq!(self.entries.len(), self.indices.len());
19        unsafe {
20            self.entries.set_len(range.start);
21        }
22        ExtractCore {
23            map: self,
24            new_len: range.start,
25            current: range.start,
26            end: range.end,
27        }
28    }
29}
30
31pub(crate) struct ExtractCore<'a, K, V> {
32    map: &'a mut IndexMapCore<K, V>,
33    new_len: usize,
34    current: usize,
35    end: usize,
36}
37
38impl<K, V> Drop for ExtractCore<'_, K, V> {
39    fn drop(&mut self) {
40        let old_len = self.map.indices.len();
41        let mut new_len = self.new_len;
42
43        debug_assert!(new_len <= self.current);
44        debug_assert!(self.current <= self.end);
45        debug_assert!(self.current <= old_len);
46        debug_assert!(old_len <= self.map.entries.capacity());
47
48        // SAFETY: We assume `new_len` and `current` were correctly maintained by the iterator.
49        // So `entries[new_len..current]` were extracted, but the rest before and after are valid.
50        unsafe {
51            if new_len == self.current {
52                // Nothing was extracted, so any remaining items can be left in place.
53                new_len = old_len;
54            } else if self.current < old_len {
55                // Need to shift the remaining items down.
56                let tail_len = old_len - self.current;
57                let base = self.map.entries.as_mut_ptr();
58                let src = base.add(self.current);
59                let dest = base.add(new_len);
60                src.copy_to(dest, tail_len);
61                new_len += tail_len;
62            }
63            self.map.entries.set_len(new_len);
64        }
65
66        if new_len != old_len {
67            // We don't keep track of *which* items were extracted, so reindex everything.
68            self.map.rebuild_hash_table();
69        }
70    }
71}
72
73impl<K, V> ExtractCore<'_, K, V> {
74    pub(crate) fn extract_if<F>(&mut self, mut pred: F) -> Option<Bucket<K, V>>
75    where
76        F: FnMut(&mut Bucket<K, V>) -> bool,
77    {
78        debug_assert!(self.end <= self.map.entries.capacity());
79
80        let base = self.map.entries.as_mut_ptr();
81        while self.current < self.end {
82            // SAFETY: We're maintaining both indices within bounds of the original entries, so
83            // 0..new_len and current..indices.len() are always valid items for our Drop to keep.
84            unsafe {
85                let item = base.add(self.current);
86                if pred(&mut *item) {
87                    // Extract it!
88                    self.current += 1;
89                    return Some(item.read());
90                } else {
91                    // Keep it, shifting it down if needed.
92                    if self.new_len != self.current {
93                        debug_assert!(self.new_len < self.current);
94                        let dest = base.add(self.new_len);
95                        item.copy_to_nonoverlapping(dest, 1);
96                    }
97                    self.current += 1;
98                    self.new_len += 1;
99                }
100            }
101        }
102        None
103    }
104
105    pub(crate) fn remaining(&self) -> usize {
106        self.end - self.current
107    }
108}