1mod entry;
11mod extract;
12
13pub mod raw_entry_v1;
14
15use alloc::vec::{self, Vec};
16use core::mem;
17use core::ops::RangeBounds;
18use hashbrown::hash_table;
19
20use crate::util::simplify_range;
21use crate::{Bucket, Equivalent, HashValue, TryReserveError};
22
23type Indices = hash_table::HashTable<usize>;
24type Entries<K, V> = Vec<Bucket<K, V>>;
25
26pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
27pub(crate) use extract::ExtractCore;
28
29#[derive(Debug)]
31pub(crate) struct IndexMapCore<K, V> {
32 indices: Indices,
34 entries: Entries<K, V>,
36}
37
38#[inline(always)]
39fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + use<'_, K, V> {
40 move |&i| entries[i].hash.get()
41}
42
43#[inline]
44fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
45 key: &'a Q,
46 entries: &'a [Bucket<K, V>],
47) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> {
48 move |&i| Q::equivalent(key, &entries[i].key)
49}
50
51#[inline]
52fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
53 if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
54 entry.remove();
55 } else if cfg!(debug_assertions) {
56 panic!("index not found");
57 }
58}
59
60#[inline]
61fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
62 let index = table
63 .find_mut(hash.get(), move |&i| i == old)
64 .expect("index not found");
65 *index = new;
66}
67
68fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
73 assert!(indices.capacity() - indices.len() >= entries.len());
74 for entry in entries {
75 indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
76 }
77}
78
79impl<K, V> Clone for IndexMapCore<K, V>
80where
81 K: Clone,
82 V: Clone,
83{
84 fn clone(&self) -> Self {
85 let mut new = Self::new();
86 new.clone_from(self);
87 new
88 }
89
90 fn clone_from(&mut self, other: &Self) {
91 self.indices.clone_from(&other.indices);
92 if self.entries.capacity() < other.entries.len() {
93 let additional = other.entries.len() - self.entries.len();
95 self.reserve_entries(additional);
96 }
97 self.entries.clone_from(&other.entries);
98 }
99}
100
101impl<K, V> IndexMapCore<K, V> {
102 const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / size_of::<Bucket<K, V>>();
104
105 #[inline]
106 pub(crate) const fn new() -> Self {
107 IndexMapCore {
108 indices: Indices::new(),
109 entries: Vec::new(),
110 }
111 }
112
113 #[inline]
114 pub(crate) fn with_capacity(n: usize) -> Self {
115 IndexMapCore {
116 indices: Indices::with_capacity(n),
117 entries: Vec::with_capacity(n),
118 }
119 }
120
121 #[inline]
122 pub(crate) fn into_entries(self) -> Entries<K, V> {
123 self.entries
124 }
125
126 #[inline]
127 pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] {
128 &self.entries
129 }
130
131 #[inline]
132 pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] {
133 &mut self.entries
134 }
135
136 pub(crate) fn with_entries<F>(&mut self, f: F)
137 where
138 F: FnOnce(&mut [Bucket<K, V>]),
139 {
140 f(&mut self.entries);
141 self.rebuild_hash_table();
142 }
143
144 #[inline]
145 pub(crate) fn len(&self) -> usize {
146 debug_assert_eq!(self.entries.len(), self.indices.len());
147 self.indices.len()
148 }
149
150 #[inline]
151 pub(crate) fn capacity(&self) -> usize {
152 Ord::min(self.indices.capacity(), self.entries.capacity())
153 }
154
155 pub(crate) fn clear(&mut self) {
156 self.indices.clear();
157 self.entries.clear();
158 }
159
160 pub(crate) fn truncate(&mut self, len: usize) {
161 if len < self.len() {
162 self.erase_indices(len, self.entries.len());
163 self.entries.truncate(len);
164 }
165 }
166
167 #[track_caller]
168 pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
169 where
170 R: RangeBounds<usize>,
171 {
172 let range = simplify_range(range, self.entries.len());
173 self.erase_indices(range.start, range.end);
174 self.entries.drain(range)
175 }
176
177 #[cfg(feature = "rayon")]
178 pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
179 where
180 K: Send,
181 V: Send,
182 R: RangeBounds<usize>,
183 {
184 use rayon::iter::ParallelDrainRange;
185 let range = simplify_range(range, self.entries.len());
186 self.erase_indices(range.start, range.end);
187 self.entries.par_drain(range)
188 }
189
190 #[track_caller]
191 pub(crate) fn split_off(&mut self, at: usize) -> Self {
192 let len = self.entries.len();
193 assert!(
194 at <= len,
195 "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
196 );
197
198 self.erase_indices(at, self.entries.len());
199 let entries = self.entries.split_off(at);
200
201 let mut indices = Indices::with_capacity(entries.len());
202 insert_bulk_no_grow(&mut indices, &entries);
203 Self { indices, entries }
204 }
205
206 #[track_caller]
207 pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
208 where
209 R: RangeBounds<usize>,
210 {
211 let range = simplify_range(range, self.len());
212 self.erase_indices(range.start, self.entries.len());
213 let entries = self.entries.split_off(range.end);
214 let drained = self.entries.split_off(range.start);
215
216 let mut indices = Indices::with_capacity(entries.len());
217 insert_bulk_no_grow(&mut indices, &entries);
218 (Self { indices, entries }, drained.into_iter())
219 }
220
221 pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
223 self.reserve(other.len());
224 insert_bulk_no_grow(&mut self.indices, &other.entries);
225 self.entries.append(&mut other.entries);
226 other.indices.clear();
227 }
228
229 pub(crate) fn reserve(&mut self, additional: usize) {
231 self.indices.reserve(additional, get_hash(&self.entries));
232 if additional > self.entries.capacity() - self.entries.len() {
234 self.reserve_entries(additional);
235 }
236 }
237
238 pub(crate) fn reserve_exact(&mut self, additional: usize) {
240 self.indices.reserve(additional, get_hash(&self.entries));
241 self.entries.reserve_exact(additional);
242 }
243
244 pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
246 self.indices
247 .try_reserve(additional, get_hash(&self.entries))
248 .map_err(TryReserveError::from_hashbrown)?;
249 if additional > self.entries.capacity() - self.entries.len() {
251 self.try_reserve_entries(additional)
252 } else {
253 Ok(())
254 }
255 }
256
257 fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
259 let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
262 let try_add = new_capacity - self.entries.len();
263 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
264 return Ok(());
265 }
266 self.entries
267 .try_reserve_exact(additional)
268 .map_err(TryReserveError::from_alloc)
269 }
270
271 pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
273 self.indices
274 .try_reserve(additional, get_hash(&self.entries))
275 .map_err(TryReserveError::from_hashbrown)?;
276 self.entries
277 .try_reserve_exact(additional)
278 .map_err(TryReserveError::from_alloc)
279 }
280
281 pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
283 self.indices
284 .shrink_to(min_capacity, get_hash(&self.entries));
285 self.entries.shrink_to(min_capacity);
286 }
287
288 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
290 if let Some(entry) = self.entries.pop() {
291 let last = self.entries.len();
292 erase_index(&mut self.indices, entry.hash, last);
293 Some((entry.key, entry.value))
294 } else {
295 None
296 }
297 }
298
299 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
301 where
302 Q: ?Sized + Equivalent<K>,
303 {
304 let eq = equivalent(key, &self.entries);
305 self.indices.find(hash.get(), eq).copied()
306 }
307
308 fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
311 if self.entries.len() == self.entries.capacity() {
312 self.reserve_entries(1);
315 }
316 self.entries.push(Bucket { hash, key, value });
317 }
318
319 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
320 where
321 K: Eq,
322 {
323 let eq = equivalent(&key, &self.entries);
324 let hasher = get_hash(&self.entries);
325 match self.indices.entry(hash.get(), eq, hasher) {
326 hash_table::Entry::Occupied(entry) => {
327 let i = *entry.get();
328 (i, Some(mem::replace(&mut self.entries[i].value, value)))
329 }
330 hash_table::Entry::Vacant(entry) => {
331 let i = self.entries.len();
332 entry.insert(i);
333 self.push_entry(hash, key, value);
334 debug_assert_eq!(self.indices.len(), self.entries.len());
335 (i, None)
336 }
337 }
338 }
339
340 pub(crate) fn replace_full(
342 &mut self,
343 hash: HashValue,
344 key: K,
345 value: V,
346 ) -> (usize, Option<(K, V)>)
347 where
348 K: Eq,
349 {
350 let eq = equivalent(&key, &self.entries);
351 let hasher = get_hash(&self.entries);
352 match self.indices.entry(hash.get(), eq, hasher) {
353 hash_table::Entry::Occupied(entry) => {
354 let i = *entry.get();
355 let entry = &mut self.entries[i];
356 let kv = (
357 mem::replace(&mut entry.key, key),
358 mem::replace(&mut entry.value, value),
359 );
360 (i, Some(kv))
361 }
362 hash_table::Entry::Vacant(entry) => {
363 let i = self.entries.len();
364 entry.insert(i);
365 self.push_entry(hash, key, value);
366 debug_assert_eq!(self.indices.len(), self.entries.len());
367 (i, None)
368 }
369 }
370 }
371
372 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
374 where
375 Q: ?Sized + Equivalent<K>,
376 {
377 let eq = equivalent(key, &self.entries);
378 let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
379 let (key, value) = self.shift_remove_finish(index);
380 Some((index, key, value))
381 }
382
383 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
385 where
386 Q: ?Sized + Equivalent<K>,
387 {
388 let eq = equivalent(key, &self.entries);
389 let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
390 let (key, value) = self.swap_remove_finish(index);
391 Some((index, key, value))
392 }
393
394 fn erase_indices(&mut self, start: usize, end: usize) {
399 let (init, shifted_entries) = self.entries.split_at(end);
400 let (start_entries, erased_entries) = init.split_at(start);
401
402 let erased = erased_entries.len();
403 let shifted = shifted_entries.len();
404 let half_capacity = self.indices.capacity() / 2;
405
406 if erased == 0 {
408 } else if start + shifted < half_capacity && start < erased {
410 self.indices.clear();
412
413 insert_bulk_no_grow(&mut self.indices, start_entries);
415 insert_bulk_no_grow(&mut self.indices, shifted_entries);
416 } else if erased + shifted < half_capacity {
417 for (i, entry) in (start..).zip(erased_entries) {
421 erase_index(&mut self.indices, entry.hash, i);
422 }
423
424 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
426 update_index(&mut self.indices, entry.hash, old, new);
427 }
428 } else {
429 let offset = end - start;
431 self.indices.retain(move |i| {
432 if *i >= end {
433 *i -= offset;
434 true
435 } else {
436 *i < start
437 }
438 });
439 }
440
441 debug_assert_eq!(self.indices.len(), start + shifted);
442 }
443
444 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
445 where
446 F: FnMut(&mut K, &mut V) -> bool,
447 {
448 self.entries
449 .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
450 if self.entries.len() < self.indices.len() {
451 self.rebuild_hash_table();
452 }
453 }
454
455 fn rebuild_hash_table(&mut self) {
456 self.indices.clear();
457 insert_bulk_no_grow(&mut self.indices, &self.entries);
458 }
459
460 pub(crate) fn reverse(&mut self) {
461 self.entries.reverse();
462
463 let len = self.entries.len();
466 for i in &mut self.indices {
467 *i = len - *i - 1;
468 }
469 }
470
471 #[inline]
473 fn reserve_entries(&mut self, additional: usize) {
474 let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
477 let try_add = try_capacity - self.entries.len();
478 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
479 return;
480 }
481 self.entries.reserve_exact(additional);
482 }
483
484 pub(super) fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> &mut Bucket<K, V> {
487 let i = self.indices.len();
488 debug_assert_eq!(i, self.entries.len());
489 self.indices
490 .insert_unique(hash.get(), i, get_hash(&self.entries));
491 self.push_entry(hash, key, value);
492 &mut self.entries[i]
493 }
494
495 #[track_caller]
498 pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
499 erase_index(&mut self.indices, self.entries[index].hash, index);
502 self.indices
503 .insert_unique(hash.get(), index, get_hash(&self.entries));
504
505 let entry = &mut self.entries[index];
506 entry.hash = hash;
507 mem::replace(&mut entry.key, key)
508 }
509
510 fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
513 let end = self.indices.len();
514 assert!(index <= end);
515 self.increment_indices(index, end);
517 let entries = &*self.entries;
518 self.indices.insert_unique(hash.get(), index, move |&i| {
519 debug_assert_ne!(i, index);
521 let i = if i < index { i } else { i - 1 };
522 entries[i].hash.get()
523 });
524 if self.entries.len() == self.entries.capacity() {
525 self.reserve_entries(1);
528 }
529 self.entries.insert(index, Bucket { hash, key, value });
530 }
531
532 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
534 match self.entries.get(index) {
535 Some(entry) => {
536 erase_index(&mut self.indices, entry.hash, index);
537 Some(self.shift_remove_finish(index))
538 }
539 None => None,
540 }
541 }
542
543 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
547 self.decrement_indices(index + 1, self.entries.len());
549
550 let entry = self.entries.remove(index);
552 (entry.key, entry.value)
553 }
554
555 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
557 match self.entries.get(index) {
558 Some(entry) => {
559 erase_index(&mut self.indices, entry.hash, index);
560 Some(self.swap_remove_finish(index))
561 }
562 None => None,
563 }
564 }
565
566 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
570 let entry = self.entries.swap_remove(index);
573
574 if let Some(entry) = self.entries.get(index) {
576 let last = self.entries.len();
579 update_index(&mut self.indices, entry.hash, last, index);
580 }
581
582 (entry.key, entry.value)
583 }
584
585 fn decrement_indices(&mut self, start: usize, end: usize) {
590 let shifted_entries = &self.entries[start..end];
592 if shifted_entries.len() > self.indices.capacity() / 2 {
593 for i in &mut self.indices {
595 if start <= *i && *i < end {
596 *i -= 1;
597 }
598 }
599 } else {
600 for (i, entry) in (start..end).zip(shifted_entries) {
602 update_index(&mut self.indices, entry.hash, i, i - 1);
603 }
604 }
605 }
606
607 fn increment_indices(&mut self, start: usize, end: usize) {
612 let shifted_entries = &self.entries[start..end];
614 if shifted_entries.len() > self.indices.capacity() / 2 {
615 for i in &mut self.indices {
617 if start <= *i && *i < end {
618 *i += 1;
619 }
620 }
621 } else {
622 for (i, entry) in (start..end).zip(shifted_entries).rev() {
625 update_index(&mut self.indices, entry.hash, i, i + 1);
626 }
627 }
628 }
629
630 #[track_caller]
631 pub(super) fn move_index(&mut self, from: usize, to: usize) {
632 let from_hash = self.entries[from].hash;
633 if from != to {
634 let _ = self.entries[to]; let bucket = self
638 .indices
639 .find_bucket_index(from_hash.get(), move |&i| i == from)
640 .expect("index not found");
641
642 self.move_index_inner(from, to);
643 *self.indices.get_bucket_mut(bucket).unwrap() = to;
644 }
645 }
646
647 fn move_index_inner(&mut self, from: usize, to: usize) {
648 if from < to {
650 self.decrement_indices(from + 1, to + 1);
651 self.entries[from..=to].rotate_left(1);
652 } else if to < from {
653 self.increment_indices(to, from);
654 self.entries[to..=from].rotate_right(1);
655 }
656 }
657
658 #[track_caller]
659 pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
660 if a == b && a < self.entries.len() {
662 return;
663 }
664
665 match self.indices.get_disjoint_mut(
668 [self.entries[a].hash.get(), self.entries[b].hash.get()],
669 move |i, &x| if i == 0 { x == a } else { x == b },
670 ) {
671 [Some(ref_a), Some(ref_b)] => {
672 mem::swap(ref_a, ref_b);
673 self.entries.swap(a, b);
674 }
675 _ => panic!("indices not found"),
676 }
677 }
678}
679
680#[test]
681fn assert_send_sync() {
682 fn assert_send_sync<T: Send + Sync>() {}
683 assert_send_sync::<IndexMapCore<i32, i32>>();
684 assert_send_sync::<Entry<'_, i32, i32>>();
685 assert_send_sync::<IndexedEntry<'_, i32, i32>>();
686 assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>();
687}