Thanks to visit codestin.com
Credit goes to llvm.org

LLVM 22.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/ADL.h"
20#include "llvm/ADT/STLExtras.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstring>
32#include <initializer_list>
33#include <iterator>
34#include <new>
35#include <type_traits>
36#include <utility>
37
38namespace llvm {
39
40namespace detail {
41
42// We extend a pair to allow users to override the bucket type with their own
43// implementation without requiring two members.
44template <typename KeyT, typename ValueT>
45struct DenseMapPair : public std::pair<KeyT, ValueT> {
46 using std::pair<KeyT, ValueT>::pair;
47
48 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
49 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
50 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
51 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
52};
53
54} // end namespace detail
55
56template <typename KeyT, typename ValueT,
57 typename KeyInfoT = DenseMapInfo<KeyT>,
59 bool IsConst = false>
60class DenseMapIterator;
61
62template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
63 typename BucketT>
65 template <typename T>
66 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
67
68public:
70 using key_type = KeyT;
71 using mapped_type = ValueT;
72 using value_type = BucketT;
73
77
78 [[nodiscard]] inline iterator begin() {
79 return iterator::makeBegin(buckets(), empty(), *this);
80 }
81 [[nodiscard]] inline iterator end() {
82 return iterator::makeEnd(buckets(), *this);
83 }
84 [[nodiscard]] inline const_iterator begin() const {
85 return const_iterator::makeBegin(buckets(), empty(), *this);
86 }
87 [[nodiscard]] inline const_iterator end() const {
88 return const_iterator::makeEnd(buckets(), *this);
89 }
90
91 // Return an iterator to iterate over keys in the map.
92 [[nodiscard]] inline auto keys() {
93 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
94 }
95
96 // Return an iterator to iterate over values in the map.
97 [[nodiscard]] inline auto values() {
98 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
99 }
100
101 [[nodiscard]] inline auto keys() const {
102 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
103 }
104
105 [[nodiscard]] inline auto values() const {
106 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
107 }
108
109 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
110 [[nodiscard]] unsigned size() const { return getNumEntries(); }
111
112 /// Grow the densemap so that it can contain at least \p NumEntries items
113 /// before resizing again.
114 void reserve(size_type NumEntries) {
115 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
117 if (NumBuckets > getNumBuckets())
118 grow(NumBuckets);
119 }
120
121 void clear() {
123 if (getNumEntries() == 0 && getNumTombstones() == 0)
124 return;
125
126 // If the capacity of the array is huge, and the # elements used is small,
127 // shrink the array.
128 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
129 shrink_and_clear();
130 return;
131 }
132
133 const KeyT EmptyKey = getEmptyKey();
134 if constexpr (std::is_trivially_destructible_v<ValueT>) {
135 // Use a simpler loop when values don't need destruction.
136 for (BucketT &B : buckets())
137 B.getFirst() = EmptyKey;
138 } else {
139 const KeyT TombstoneKey = getTombstoneKey();
140 unsigned NumEntries = getNumEntries();
141 for (BucketT &B : buckets()) {
142 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey)) {
143 if (!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
144 B.getSecond().~ValueT();
145 --NumEntries;
146 }
147 B.getFirst() = EmptyKey;
148 }
149 }
150 assert(NumEntries == 0 && "Node count imbalance!");
151 (void)NumEntries;
152 }
153 setNumEntries(0);
154 setNumTombstones(0);
155 }
156
157 /// Return true if the specified key is in the map, false otherwise.
158 [[nodiscard]] bool contains(const_arg_type_t<KeyT> Val) const {
159 return doFind(Val) != nullptr;
160 }
161
162 /// Return 1 if the specified key is in the map, 0 otherwise.
163 [[nodiscard]] size_type count(const_arg_type_t<KeyT> Val) const {
164 return contains(Val) ? 1 : 0;
165 }
166
167 [[nodiscard]] iterator find(const_arg_type_t<KeyT> Val) {
168 return find_as(Val);
169 }
170 [[nodiscard]] const_iterator find(const_arg_type_t<KeyT> Val) const {
171 return find_as(Val);
172 }
173
174 /// Alternate version of find() which allows a different, and possibly
175 /// less expensive, key type.
176 /// The DenseMapInfo is responsible for supplying methods
177 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
178 /// type used.
179 template <class LookupKeyT>
180 [[nodiscard]] iterator find_as(const LookupKeyT &Val) {
181 if (BucketT *Bucket = doFind(Val))
182 return makeIterator(Bucket);
183 return end();
184 }
185 template <class LookupKeyT>
186 [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const {
187 if (const BucketT *Bucket = doFind(Val))
188 return makeConstIterator(Bucket);
189 return end();
190 }
191
192 /// lookup - Return the entry for the specified key, or a default
193 /// constructed value if no such entry exists.
194 [[nodiscard]] ValueT lookup(const_arg_type_t<KeyT> Val) const {
195 if (const BucketT *Bucket = doFind(Val))
196 return Bucket->getSecond();
197 return ValueT();
198 }
199
200 // Return the entry with the specified key, or \p Default. This variant is
201 // useful, because `lookup` cannot be used with non-default-constructible
202 // values.
203 template <typename U = std::remove_cv_t<ValueT>>
204 [[nodiscard]] ValueT lookup_or(const_arg_type_t<KeyT> Val,
205 U &&Default) const {
206 if (const BucketT *Bucket = doFind(Val))
207 return Bucket->getSecond();
208 return Default;
209 }
210
211 /// at - Return the entry for the specified key, or abort if no such
212 /// entry exists.
213 [[nodiscard]] const ValueT &at(const_arg_type_t<KeyT> Val) const {
214 auto Iter = this->find(std::move(Val));
215 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
216 return Iter->second;
217 }
218
219 // Inserts key,value pair into the map if the key isn't already in the map.
220 // If the key is already in the map, it returns false and doesn't update the
221 // value.
222 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
223 return try_emplace_impl(KV.first, KV.second);
224 }
225
226 // Inserts key,value pair into the map if the key isn't already in the map.
227 // If the key is already in the map, it returns false and doesn't update the
228 // value.
229 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
230 return try_emplace_impl(std::move(KV.first), std::move(KV.second));
231 }
232
233 // Inserts key,value pair into the map if the key isn't already in the map.
234 // The value is constructed in-place if the key is not in the map, otherwise
235 // it is not moved.
236 template <typename... Ts>
237 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&...Args) {
238 return try_emplace_impl(std::move(Key), std::forward<Ts>(Args)...);
239 }
240
241 // Inserts key,value pair into the map if the key isn't already in the map.
242 // The value is constructed in-place if the key is not in the map, otherwise
243 // it is not moved.
244 template <typename... Ts>
245 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&...Args) {
246 return try_emplace_impl(Key, std::forward<Ts>(Args)...);
247 }
248
249 /// Alternate version of insert() which allows a different, and possibly
250 /// less expensive, key type.
251 /// The DenseMapInfo is responsible for supplying methods
252 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
253 /// type used.
254 template <typename LookupKeyT>
255 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
256 const LookupKeyT &Val) {
257 BucketT *TheBucket;
258 if (LookupBucketFor(Val, TheBucket))
259 return {makeIterator(TheBucket), false}; // Already in map.
260
261 // Otherwise, insert the new element.
262 TheBucket = findBucketForInsertion(Val, TheBucket);
263 TheBucket->getFirst() = std::move(KV.first);
264 ::new (&TheBucket->getSecond()) ValueT(std::move(KV.second));
265 return {makeIterator(TheBucket), true};
266 }
267
268 /// insert - Range insertion of pairs.
269 template <typename InputIt> void insert(InputIt I, InputIt E) {
270 for (; I != E; ++I)
271 insert(*I);
272 }
273
274 /// Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
275 template <typename Range> void insert_range(Range &&R) {
276 insert(adl_begin(R), adl_end(R));
277 }
278
279 template <typename V>
280 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
281 auto Ret = try_emplace(Key, std::forward<V>(Val));
282 if (!Ret.second)
283 Ret.first->second = std::forward<V>(Val);
284 return Ret;
285 }
286
287 template <typename V>
288 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
289 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
290 if (!Ret.second)
291 Ret.first->second = std::forward<V>(Val);
292 return Ret;
293 }
294
295 template <typename... Ts>
296 std::pair<iterator, bool> emplace_or_assign(const KeyT &Key, Ts &&...Args) {
297 auto Ret = try_emplace(Key, std::forward<Ts>(Args)...);
298 if (!Ret.second)
299 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
300 return Ret;
301 }
302
303 template <typename... Ts>
304 std::pair<iterator, bool> emplace_or_assign(KeyT &&Key, Ts &&...Args) {
305 auto Ret = try_emplace(std::move(Key), std::forward<Ts>(Args)...);
306 if (!Ret.second)
307 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
308 return Ret;
309 }
310
311 bool erase(const KeyT &Val) {
312 BucketT *TheBucket = doFind(Val);
313 if (!TheBucket)
314 return false; // not in map.
315
316 TheBucket->getSecond().~ValueT();
317 TheBucket->getFirst() = getTombstoneKey();
318 decrementNumEntries();
319 incrementNumTombstones();
320 return true;
321 }
323 BucketT *TheBucket = &*I;
324 TheBucket->getSecond().~ValueT();
325 TheBucket->getFirst() = getTombstoneKey();
326 decrementNumEntries();
327 incrementNumTombstones();
328 }
329
330 ValueT &operator[](const KeyT &Key) {
331 return lookupOrInsertIntoBucket(Key).first->second;
332 }
333
334 ValueT &operator[](KeyT &&Key) {
335 return lookupOrInsertIntoBucket(std::move(Key)).first->second;
336 }
337
338 /// isPointerIntoBucketsArray - Return true if the specified pointer points
339 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
340 /// value in the DenseMap).
341 [[nodiscard]] bool isPointerIntoBucketsArray(const void *Ptr) const {
342 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
343 }
344
345 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
346 /// array. In conjunction with the previous method, this can be used to
347 /// determine whether an insertion caused the DenseMap to reallocate.
348 [[nodiscard]] const void *getPointerIntoBucketsArray() const {
349 return getBuckets();
350 }
351
352protected:
353 DenseMapBase() = default;
354
355 void destroyAll() {
356 if (getNumBuckets() == 0) // Nothing to do.
357 return;
358
359 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
360 for (BucketT &B : buckets()) {
361 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
362 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey))
363 B.getSecond().~ValueT();
364 B.getFirst().~KeyT();
365 }
366 }
367
368 void initEmpty() {
369 setNumEntries(0);
370 setNumTombstones(0);
371
372 assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
373 "# initial buckets must be a power of two!");
374 const KeyT EmptyKey = getEmptyKey();
375 for (BucketT &B : buckets())
376 ::new (&B.getFirst()) KeyT(EmptyKey);
377 }
378
379 /// Returns the number of buckets to allocate to ensure that the DenseMap can
380 /// accommodate \p NumEntries without need to grow().
381 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
382 // Ensure that "NumEntries * 4 < NumBuckets * 3"
383 if (NumEntries == 0)
384 return 0;
385 // +1 is required because of the strict inequality.
386 // For example, if NumEntries is 48, we need to return 128.
387 return NextPowerOf2(NumEntries * 4 / 3 + 1);
388 }
389
391 initEmpty();
392
393 // Insert all the old elements.
394 const KeyT EmptyKey = getEmptyKey();
395 const KeyT TombstoneKey = getTombstoneKey();
396 for (BucketT &B : OldBuckets) {
397 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
398 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
399 // Insert the key/value into the new table.
400 BucketT *DestBucket;
401 bool FoundVal = LookupBucketFor(B.getFirst(), DestBucket);
402 (void)FoundVal; // silence warning.
403 assert(!FoundVal && "Key already in new map?");
404 DestBucket->getFirst() = std::move(B.getFirst());
405 ::new (&DestBucket->getSecond()) ValueT(std::move(B.getSecond()));
406 incrementNumEntries();
407
408 // Free the value.
409 B.getSecond().~ValueT();
410 }
411 B.getFirst().~KeyT();
412 }
413 }
414
415 template <typename OtherBaseT>
418 assert(&other != this);
419 assert(getNumBuckets() == other.getNumBuckets());
420
421 setNumEntries(other.getNumEntries());
422 setNumTombstones(other.getNumTombstones());
423
424 BucketT *Buckets = getBuckets();
425 const BucketT *OtherBuckets = other.getBuckets();
426 const size_t NumBuckets = getNumBuckets();
427 if constexpr (std::is_trivially_copyable_v<KeyT> &&
428 std::is_trivially_copyable_v<ValueT>) {
429 memcpy(reinterpret_cast<void *>(Buckets), OtherBuckets,
430 NumBuckets * sizeof(BucketT));
431 } else {
432 const KeyT EmptyKey = getEmptyKey();
433 const KeyT TombstoneKey = getTombstoneKey();
434 for (size_t I = 0; I < NumBuckets; ++I) {
435 ::new (&Buckets[I].getFirst()) KeyT(OtherBuckets[I].getFirst());
436 if (!KeyInfoT::isEqual(Buckets[I].getFirst(), EmptyKey) &&
437 !KeyInfoT::isEqual(Buckets[I].getFirst(), TombstoneKey))
438 ::new (&Buckets[I].getSecond()) ValueT(OtherBuckets[I].getSecond());
439 }
440 }
441 }
442
443 static unsigned getHashValue(const KeyT &Val) {
444 return KeyInfoT::getHashValue(Val);
445 }
446
447 template <typename LookupKeyT>
448 static unsigned getHashValue(const LookupKeyT &Val) {
449 return KeyInfoT::getHashValue(Val);
450 }
451
452 static const KeyT getEmptyKey() {
453 static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
454 "Must pass the derived type to this template!");
455 return KeyInfoT::getEmptyKey();
456 }
457
458 static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
459
460private:
461 DerivedT &derived() { return *static_cast<DerivedT *>(this); }
462 const DerivedT &derived() const {
463 return *static_cast<const DerivedT *>(this);
464 }
465
466 template <typename KeyArgT, typename... Ts>
467 std::pair<BucketT *, bool> lookupOrInsertIntoBucket(KeyArgT &&Key,
468 Ts &&...Args) {
469 BucketT *TheBucket = nullptr;
470 if (LookupBucketFor(Key, TheBucket))
471 return {TheBucket, false}; // Already in the map.
472
473 // Otherwise, insert the new element.
474 TheBucket = findBucketForInsertion(Key, TheBucket);
475 TheBucket->getFirst() = std::forward<KeyArgT>(Key);
476 ::new (&TheBucket->getSecond()) ValueT(std::forward<Ts>(Args)...);
477 return {TheBucket, true};
478 }
479
480 template <typename KeyArgT, typename... Ts>
481 std::pair<iterator, bool> try_emplace_impl(KeyArgT &&Key, Ts &&...Args) {
482 auto [Bucket, Inserted] = lookupOrInsertIntoBucket(
483 std::forward<KeyArgT>(Key), std::forward<Ts>(Args)...);
484 return {makeIterator(Bucket), Inserted};
485 }
486
487 iterator makeIterator(BucketT *TheBucket) {
488 return iterator::makeIterator(TheBucket, buckets(), *this);
489 }
490
491 const_iterator makeConstIterator(const BucketT *TheBucket) const {
492 return const_iterator::makeIterator(TheBucket, buckets(), *this);
493 }
494
495 unsigned getNumEntries() const { return derived().getNumEntries(); }
496
497 void setNumEntries(unsigned Num) { derived().setNumEntries(Num); }
498
499 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
500
501 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
502
503 unsigned getNumTombstones() const { return derived().getNumTombstones(); }
504
505 void setNumTombstones(unsigned Num) { derived().setNumTombstones(Num); }
506
507 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
508
509 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
510
511 const BucketT *getBuckets() const { return derived().getBuckets(); }
512
513 BucketT *getBuckets() { return derived().getBuckets(); }
514
515 unsigned getNumBuckets() const { return derived().getNumBuckets(); }
516
517 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
518
519 const BucketT *getBucketsEnd() const {
520 return getBuckets() + getNumBuckets();
521 }
522
523 iterator_range<BucketT *> buckets() {
524 return llvm::make_range(getBuckets(), getBucketsEnd());
525 }
526
527 iterator_range<const BucketT *> buckets() const {
528 return llvm::make_range(getBuckets(), getBucketsEnd());
529 }
530
531 void grow(unsigned AtLeast) { derived().grow(AtLeast); }
532
533 void shrink_and_clear() { derived().shrink_and_clear(); }
534
535 template <typename LookupKeyT>
536 BucketT *findBucketForInsertion(const LookupKeyT &Lookup,
537 BucketT *TheBucket) {
539
540 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
541 // the buckets are empty (meaning that many are filled with tombstones),
542 // grow the table.
543 //
544 // The later case is tricky. For example, if we had one empty bucket with
545 // tons of tombstones, failing lookups (e.g. for insertion) would have to
546 // probe almost the entire table until it found the empty bucket. If the
547 // table completely filled with tombstones, no lookup would ever succeed,
548 // causing infinite loops in lookup.
549 unsigned NewNumEntries = getNumEntries() + 1;
550 unsigned NumBuckets = getNumBuckets();
551 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
552 this->grow(NumBuckets * 2);
553 LookupBucketFor(Lookup, TheBucket);
554 } else if (LLVM_UNLIKELY(NumBuckets -
555 (NewNumEntries + getNumTombstones()) <=
556 NumBuckets / 8)) {
557 this->grow(NumBuckets);
558 LookupBucketFor(Lookup, TheBucket);
559 }
560 assert(TheBucket);
561
562 // Only update the state after we've grown our bucket space appropriately
563 // so that when growing buckets we have self-consistent entry count.
564 incrementNumEntries();
565
566 // If we are writing over a tombstone, remember this.
567 const KeyT EmptyKey = getEmptyKey();
568 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
569 decrementNumTombstones();
570
571 return TheBucket;
572 }
573
574 template <typename LookupKeyT>
575 const BucketT *doFind(const LookupKeyT &Val) const {
576 const BucketT *BucketsPtr = getBuckets();
577 const unsigned NumBuckets = getNumBuckets();
578 if (NumBuckets == 0)
579 return nullptr;
580
581 const KeyT EmptyKey = getEmptyKey();
582 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
583 unsigned ProbeAmt = 1;
584 while (true) {
585 const BucketT *Bucket = BucketsPtr + BucketNo;
586 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
587 return Bucket;
588 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
589 return nullptr;
590
591 // Otherwise, it's a hash collision or a tombstone, continue quadratic
592 // probing.
593 BucketNo += ProbeAmt++;
594 BucketNo &= NumBuckets - 1;
595 }
596 }
597
598 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
599 return const_cast<BucketT *>(
600 static_cast<const DenseMapBase *>(this)->doFind(Val));
601 }
602
603 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
604 /// FoundBucket. If the bucket contains the key and a value, this returns
605 /// true, otherwise it returns a bucket with an empty marker or tombstone and
606 /// returns false.
607 template <typename LookupKeyT>
608 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
609 BucketT *BucketsPtr = getBuckets();
610 const unsigned NumBuckets = getNumBuckets();
611
612 if (NumBuckets == 0) {
613 FoundBucket = nullptr;
614 return false;
615 }
616
617 // FoundTombstone - Keep track of whether we find a tombstone while probing.
618 BucketT *FoundTombstone = nullptr;
619 const KeyT EmptyKey = getEmptyKey();
620 const KeyT TombstoneKey = getTombstoneKey();
621 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
622 !KeyInfoT::isEqual(Val, TombstoneKey) &&
623 "Empty/Tombstone value shouldn't be inserted into map!");
624
625 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
626 unsigned ProbeAmt = 1;
627 while (true) {
628 BucketT *ThisBucket = BucketsPtr + BucketNo;
629 // Found Val's bucket? If so, return it.
630 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
631 FoundBucket = ThisBucket;
632 return true;
633 }
634
635 // If we found an empty bucket, the key doesn't exist in the set.
636 // Insert it and return the default value.
637 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
638 // If we've already seen a tombstone while probing, fill it in instead
639 // of the empty bucket we eventually probed to.
640 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
641 return false;
642 }
643
644 // If this is a tombstone, remember it. If Val ends up not in the map, we
645 // prefer to return it than something that would require more probing.
646 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
647 !FoundTombstone)
648 FoundTombstone = ThisBucket; // Remember the first tombstone found.
649
650 // Otherwise, it's a hash collision or a tombstone, continue quadratic
651 // probing.
652 BucketNo += ProbeAmt++;
653 BucketNo &= (NumBuckets - 1);
654 }
655 }
656
657public:
658 /// Return the approximate size (in bytes) of the actual map.
659 /// This is just the raw memory used by DenseMap.
660 /// If entries are pointers to objects, the size of the referenced objects
661 /// are not included.
662 [[nodiscard]] size_t getMemorySize() const {
663 return getNumBuckets() * sizeof(BucketT);
664 }
665};
666
667/// Equality comparison for DenseMap.
668///
669/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
670/// is also in RHS, and that no additional pairs are in RHS.
671/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
672/// complexity is linear, worst case is O(N^2) (if every hash collides).
673template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
674 typename BucketT>
675[[nodiscard]] bool
678 if (LHS.size() != RHS.size())
679 return false;
680
681 for (auto &KV : LHS) {
682 auto I = RHS.find(KV.first);
683 if (I == RHS.end() || I->second != KV.second)
684 return false;
685 }
686
687 return true;
688}
689
690/// Inequality comparison for DenseMap.
691///
692/// Equivalent to !(LHS == RHS). See operator== for performance notes.
693template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
694 typename BucketT>
695[[nodiscard]] bool
700
701template <typename KeyT, typename ValueT,
702 typename KeyInfoT = DenseMapInfo<KeyT>,
704class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
705 KeyT, ValueT, KeyInfoT, BucketT> {
706 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
707
708 // Lift some types from the dependent base class into this class for
709 // simplicity of referring to them.
711
712 BucketT *Buckets;
713 unsigned NumEntries;
714 unsigned NumTombstones;
715 unsigned NumBuckets;
716
717public:
718 /// Create a DenseMap with an optional \p NumElementsToReserve to guarantee
719 /// that this number of elements can be inserted in the map without grow().
720 explicit DenseMap(unsigned NumElementsToReserve = 0) {
721 init(NumElementsToReserve);
722 }
723
724 DenseMap(const DenseMap &other) : BaseT() {
725 init(0);
726 copyFrom(other);
727 }
728
729 DenseMap(DenseMap &&other) : BaseT() {
730 init(0);
731 swap(other);
732 }
733
734 template <typename InputIt> DenseMap(const InputIt &I, const InputIt &E) {
735 init(std::distance(I, E));
736 this->insert(I, E);
737 }
738
739 template <typename RangeT>
742
743 DenseMap(std::initializer_list<typename BaseT::value_type> Vals)
744 : DenseMap(Vals.begin(), Vals.end()) {}
745
747 this->destroyAll();
748 deallocateBuckets();
749 }
750
752 this->incrementEpoch();
753 RHS.incrementEpoch();
754 std::swap(Buckets, RHS.Buckets);
755 std::swap(NumEntries, RHS.NumEntries);
756 std::swap(NumTombstones, RHS.NumTombstones);
757 std::swap(NumBuckets, RHS.NumBuckets);
758 }
759
760 DenseMap &operator=(const DenseMap &other) {
761 if (&other != this)
762 copyFrom(other);
763 return *this;
764 }
765
767 this->destroyAll();
768 deallocateBuckets();
769 init(0);
770 swap(other);
771 return *this;
772 }
773
774 void copyFrom(const DenseMap &other) {
775 this->destroyAll();
776 deallocateBuckets();
777 if (allocateBuckets(other.NumBuckets)) {
778 this->BaseT::copyFrom(other);
779 } else {
780 NumEntries = 0;
781 NumTombstones = 0;
782 }
783 }
784
785 void grow(unsigned AtLeast) {
786 unsigned OldNumBuckets = NumBuckets;
787 BucketT *OldBuckets = Buckets;
788
789 allocateBuckets(std::max<unsigned>(
790 64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
791 assert(Buckets);
792 if (!OldBuckets) {
793 this->BaseT::initEmpty();
794 return;
795 }
796
797 this->moveFromOldBuckets(
798 llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
799
800 // Free the old table.
801 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
802 alignof(BucketT));
803 }
804
806 unsigned OldNumBuckets = NumBuckets;
807 unsigned OldNumEntries = NumEntries;
808 this->destroyAll();
809
810 // Reduce the number of buckets.
811 unsigned NewNumBuckets = 0;
812 if (OldNumEntries)
813 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
814 if (NewNumBuckets == NumBuckets) {
815 this->BaseT::initEmpty();
816 return;
817 }
818
819 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
820 alignof(BucketT));
821 init(NewNumBuckets);
822 }
823
824private:
825 unsigned getNumEntries() const { return NumEntries; }
826
827 void setNumEntries(unsigned Num) { NumEntries = Num; }
828
829 unsigned getNumTombstones() const { return NumTombstones; }
830
831 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
832
833 BucketT *getBuckets() const { return Buckets; }
834
835 unsigned getNumBuckets() const { return NumBuckets; }
836
837 void deallocateBuckets() {
838 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
839 }
840
841 bool allocateBuckets(unsigned Num) {
842 NumBuckets = Num;
843 if (NumBuckets == 0) {
844 Buckets = nullptr;
845 return false;
846 }
847
848 Buckets = static_cast<BucketT *>(
849 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
850 return true;
851 }
852
853 void init(unsigned InitNumEntries) {
854 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
855 if (allocateBuckets(InitBuckets)) {
856 this->BaseT::initEmpty();
857 } else {
858 NumEntries = 0;
859 NumTombstones = 0;
860 }
861 }
862};
863
864template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
865 typename KeyInfoT = DenseMapInfo<KeyT>,
868 : public DenseMapBase<
869 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
870 ValueT, KeyInfoT, BucketT> {
871 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
872
873 // Lift some types from the dependent base class into this class for
874 // simplicity of referring to them.
876
877 static_assert(isPowerOf2_64(InlineBuckets),
878 "InlineBuckets must be a power of 2.");
879
880 unsigned Small : 1;
881 unsigned NumEntries : 31;
882 unsigned NumTombstones;
883
884 struct LargeRep {
885 BucketT *Buckets;
886 unsigned NumBuckets;
888 return llvm::make_range(Buckets, Buckets + NumBuckets);
889 }
890 };
891
892 /// A "union" of an inline bucket array and the struct representing
893 /// a large bucket. This union will be discriminated by the 'Small' bit.
894 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
895
896public:
897 explicit SmallDenseMap(unsigned NumElementsToReserve = 0) {
898 init(NumElementsToReserve);
899 }
900
901 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
902 init(0);
903 copyFrom(other);
904 }
905
906 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
907 init(0);
908 swap(other);
909 }
910
911 template <typename InputIt>
912 SmallDenseMap(const InputIt &I, const InputIt &E) {
913 init(std::distance(I, E));
914 this->insert(I, E);
915 }
916
917 template <typename RangeT>
920
921 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
922 : SmallDenseMap(Vals.begin(), Vals.end()) {}
923
925 this->destroyAll();
926 deallocateBuckets();
927 }
928
930 unsigned TmpNumEntries = RHS.NumEntries;
931 RHS.NumEntries = NumEntries;
932 NumEntries = TmpNumEntries;
933 std::swap(NumTombstones, RHS.NumTombstones);
934
935 const KeyT EmptyKey = this->getEmptyKey();
936 const KeyT TombstoneKey = this->getTombstoneKey();
937 if (Small && RHS.Small) {
938 // If we're swapping inline bucket arrays, we have to cope with some of
939 // the tricky bits of DenseMap's storage system: the buckets are not
940 // fully initialized. Thus we swap every key, but we may have
941 // a one-directional move of the value.
942 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
943 BucketT *LHSB = &getInlineBuckets()[i],
944 *RHSB = &RHS.getInlineBuckets()[i];
945 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
946 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
947 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
948 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
949 if (hasLHSValue && hasRHSValue) {
950 // Swap together if we can...
951 std::swap(*LHSB, *RHSB);
952 continue;
953 }
954 // Swap separately and handle any asymmetry.
955 std::swap(LHSB->getFirst(), RHSB->getFirst());
956 if (hasLHSValue) {
957 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
958 LHSB->getSecond().~ValueT();
959 } else if (hasRHSValue) {
960 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
961 RHSB->getSecond().~ValueT();
962 }
963 }
964 return;
965 }
966 if (!Small && !RHS.Small) {
967 std::swap(*getLargeRep(), *RHS.getLargeRep());
968 return;
969 }
970
971 SmallDenseMap &SmallSide = Small ? *this : RHS;
972 SmallDenseMap &LargeSide = Small ? RHS : *this;
973
974 // First stash the large side's rep and move the small side across.
975 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
976 LargeSide.getLargeRep()->~LargeRep();
977 LargeSide.Small = true;
978 // This is similar to the standard move-from-old-buckets, but the bucket
979 // count hasn't actually rotated in this case. So we have to carefully
980 // move construct the keys and values into their new locations, but there
981 // is no need to re-hash things.
982 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
983 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
984 *OldB = &SmallSide.getInlineBuckets()[i];
985 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
986 OldB->getFirst().~KeyT();
987 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
988 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
989 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
990 OldB->getSecond().~ValueT();
991 }
992 }
993
994 // The hard part of moving the small buckets across is done, just move
995 // the TmpRep into its new home.
996 SmallSide.Small = false;
997 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
998 }
999
1001 if (&other != this)
1002 copyFrom(other);
1003 return *this;
1004 }
1005
1007 this->destroyAll();
1008 deallocateBuckets();
1009 init(0);
1010 swap(other);
1011 return *this;
1012 }
1013
1014 void copyFrom(const SmallDenseMap &other) {
1015 this->destroyAll();
1016 deallocateBuckets();
1017 allocateBuckets(other.getNumBuckets());
1018 this->BaseT::copyFrom(other);
1019 }
1020
1021 void init(unsigned InitNumEntries) {
1022 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
1023 allocateBuckets(InitBuckets);
1024 this->BaseT::initEmpty();
1025 }
1026
1027 void grow(unsigned AtLeast) {
1028 if (AtLeast > InlineBuckets)
1029 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
1030
1031 if (Small) {
1032 // First move the inline buckets into a temporary storage.
1034 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1035 BucketT *TmpEnd = TmpBegin;
1036
1037 // Loop over the buckets, moving non-empty, non-tombstones into the
1038 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1039 const KeyT EmptyKey = this->getEmptyKey();
1040 const KeyT TombstoneKey = this->getTombstoneKey();
1041 for (BucketT &B : inlineBuckets()) {
1042 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1043 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1044 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1045 "Too many inline buckets!");
1046 ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1047 ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1048 ++TmpEnd;
1049 B.getSecond().~ValueT();
1050 }
1051 B.getFirst().~KeyT();
1052 }
1053
1054 // AtLeast == InlineBuckets can happen if there are many tombstones,
1055 // and grow() is used to remove them. Usually we always switch to the
1056 // large rep here.
1057 allocateBuckets(AtLeast);
1058 this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1059 return;
1060 }
1061
1062 LargeRep OldRep = std::move(*getLargeRep());
1063 getLargeRep()->~LargeRep();
1064 allocateBuckets(AtLeast);
1065
1066 this->moveFromOldBuckets(OldRep.buckets());
1067
1068 // Free the old table.
1069 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1070 alignof(BucketT));
1071 }
1072
1074 unsigned OldSize = this->size();
1075 this->destroyAll();
1076
1077 // Reduce the number of buckets.
1078 unsigned NewNumBuckets = 0;
1079 if (OldSize) {
1080 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1081 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1082 NewNumBuckets = 64;
1083 }
1084 if ((Small && NewNumBuckets <= InlineBuckets) ||
1085 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1086 this->BaseT::initEmpty();
1087 return;
1088 }
1089
1090 deallocateBuckets();
1091 init(NewNumBuckets);
1092 }
1093
1094private:
1095 unsigned getNumEntries() const { return NumEntries; }
1096
1097 void setNumEntries(unsigned Num) {
1098 // NumEntries is hardcoded to be 31 bits wide.
1099 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1100 NumEntries = Num;
1101 }
1102
1103 unsigned getNumTombstones() const { return NumTombstones; }
1104
1105 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
1106
1107 const BucketT *getInlineBuckets() const {
1108 assert(Small);
1109 // Note that this cast does not violate aliasing rules as we assert that
1110 // the memory's dynamic type is the small, inline bucket buffer, and the
1111 // 'storage' is a POD containing a char buffer.
1112 return reinterpret_cast<const BucketT *>(&storage);
1113 }
1114
1115 BucketT *getInlineBuckets() {
1116 return const_cast<BucketT *>(
1117 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1118 }
1119
1120 const LargeRep *getLargeRep() const {
1121 assert(!Small);
1122 // Note, same rule about aliasing as with getInlineBuckets.
1123 return reinterpret_cast<const LargeRep *>(&storage);
1124 }
1125
1126 LargeRep *getLargeRep() {
1127 return const_cast<LargeRep *>(
1128 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1129 }
1130
1131 const BucketT *getBuckets() const {
1132 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1133 }
1134
1135 BucketT *getBuckets() {
1136 return const_cast<BucketT *>(
1137 const_cast<const SmallDenseMap *>(this)->getBuckets());
1138 }
1139
1140 unsigned getNumBuckets() const {
1141 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1142 }
1143
1144 iterator_range<BucketT *> inlineBuckets() {
1145 BucketT *Begin = getInlineBuckets();
1146 return llvm::make_range(Begin, Begin + InlineBuckets);
1147 }
1148
1149 void deallocateBuckets() {
1150 if (Small)
1151 return;
1152
1153 deallocate_buffer(getLargeRep()->Buckets,
1154 sizeof(BucketT) * getLargeRep()->NumBuckets,
1155 alignof(BucketT));
1156 getLargeRep()->~LargeRep();
1157 }
1158
1159 void allocateBuckets(unsigned Num) {
1160 if (Num <= InlineBuckets) {
1161 Small = true;
1162 } else {
1163 Small = false;
1164 BucketT *NewBuckets = static_cast<BucketT *>(
1165 allocate_buffer(sizeof(BucketT) * Num, alignof(BucketT)));
1166 new (getLargeRep()) LargeRep{NewBuckets, Num};
1167 }
1168 }
1169};
1170
1171template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1172 bool IsConst>
1173class DenseMapIterator : DebugEpochBase::HandleBase {
1174 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1175 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1176
1177public:
1179 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1182 using iterator_category = std::forward_iterator_tag;
1183
1184private:
1185 using BucketItTy =
1186 std::conditional_t<shouldReverseIterate<KeyT>(),
1187 std::reverse_iterator<pointer>, pointer>;
1188
1189 BucketItTy Ptr = {};
1190 BucketItTy End = {};
1191
1192 DenseMapIterator(BucketItTy Pos, BucketItTy E, const DebugEpochBase &Epoch)
1193 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1194 assert(isHandleInSync() && "invalid construction!");
1195 }
1196
1197public:
1198 DenseMapIterator() = default;
1199
1200 static DenseMapIterator makeBegin(iterator_range<pointer> Buckets,
1201 bool IsEmpty, const DebugEpochBase &Epoch) {
1202 // When the map is empty, avoid the overhead of advancing/retreating past
1203 // empty buckets.
1204 if (IsEmpty)
1205 return makeEnd(Buckets, Epoch);
1206 auto R = maybeReverse(Buckets);
1207 DenseMapIterator Iter(R.begin(), R.end(), Epoch);
1208 Iter.AdvancePastEmptyBuckets();
1209 return Iter;
1210 }
1211
1212 static DenseMapIterator makeEnd(iterator_range<pointer> Buckets,
1213 const DebugEpochBase &Epoch) {
1214 auto R = maybeReverse(Buckets);
1215 return DenseMapIterator(R.end(), R.end(), Epoch);
1216 }
1217
1218 static DenseMapIterator makeIterator(pointer P,
1220 const DebugEpochBase &Epoch) {
1221 auto R = maybeReverse(Buckets);
1222 constexpr int Offset = shouldReverseIterate<KeyT>() ? 1 : 0;
1223 return DenseMapIterator(BucketItTy(P + Offset), R.end(), Epoch);
1224 }
1225
1226 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1227 // for const iterator destinations so it doesn't end up as a user defined copy
1228 // constructor.
1229 template <bool IsConstSrc,
1230 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1232 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1233 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1234
1235 [[nodiscard]] reference operator*() const {
1236 assert(isHandleInSync() && "invalid iterator access!");
1237 assert(Ptr != End && "dereferencing end() iterator");
1238 return *Ptr;
1239 }
1240 [[nodiscard]] pointer operator->() const { return &operator*(); }
1241
1242 [[nodiscard]] friend bool operator==(const DenseMapIterator &LHS,
1243 const DenseMapIterator &RHS) {
1244 assert((!LHS.getEpochAddress() || LHS.isHandleInSync()) &&
1245 "handle not in sync!");
1246 assert((!RHS.getEpochAddress() || RHS.isHandleInSync()) &&
1247 "handle not in sync!");
1248 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1249 "comparing incomparable iterators!");
1250 return LHS.Ptr == RHS.Ptr;
1251 }
1252
1253 [[nodiscard]] friend bool operator!=(const DenseMapIterator &LHS,
1254 const DenseMapIterator &RHS) {
1255 return !(LHS == RHS);
1256 }
1257
1258 inline DenseMapIterator &operator++() { // Preincrement
1259 assert(isHandleInSync() && "invalid iterator access!");
1260 assert(Ptr != End && "incrementing end() iterator");
1261 ++Ptr;
1262 AdvancePastEmptyBuckets();
1263 return *this;
1264 }
1265 DenseMapIterator operator++(int) { // Postincrement
1266 assert(isHandleInSync() && "invalid iterator access!");
1267 DenseMapIterator tmp = *this;
1268 ++*this;
1269 return tmp;
1270 }
1271
1272private:
1273 void AdvancePastEmptyBuckets() {
1274 assert(Ptr <= End);
1275 const KeyT Empty = KeyInfoT::getEmptyKey();
1276 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1277
1278 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1279 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1280 ++Ptr;
1281 }
1282
1283 static auto maybeReverse(iterator_range<pointer> Range) {
1284 if constexpr (shouldReverseIterate<KeyT>())
1285 return reverse(Range);
1286 else
1287 return Range;
1288 }
1289};
1290
1291template <typename KeyT, typename ValueT, typename KeyInfoT>
1292[[nodiscard]] inline size_t
1294 return X.getMemorySize();
1295}
1296
1297} // end namespace llvm
1298
1299#endif // LLVM_ADT_DENSEMAP_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition Compiler.h:336
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
#define I(x, y, z)
Definition MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
This file contains some templates that are useful if you are working with the STL at all.
This file contains library features backported from future STL versions.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
LocallyHashedType DenseMapInfo< LocallyHashedType >::Tombstone
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
static unsigned getHashValue(const KeyT &Val)
Definition DenseMap.h:443
unsigned size_type
Definition DenseMap.h:69
static const KeyT getEmptyKey()
Definition DenseMap.h:452
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:237
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition DenseMap.h:229
bool erase(const KeyT &Val)
Definition DenseMap.h:311
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition DenseMap.h:255
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition DenseMap.h:186
const_iterator end() const
Definition DenseMap.h:87
void moveFromOldBuckets(iterator_range< BucketT * > OldBuckets)
Definition DenseMap.h:390
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:180
unsigned size() const
Definition DenseMap.h:110
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition DenseMap.h:170
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > emplace_or_assign(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:296
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition DenseMap.h:269
iterator begin()
Definition DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:163
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
static const KeyT getTombstoneKey()
Definition DenseMap.h:458
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition DenseMap.h:213
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition DenseMap.h:341
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition DenseMap.h:416
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:158
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:245
const_iterator begin() const
Definition DenseMap.h:84
std::pair< iterator, bool > emplace_or_assign(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:304
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:275
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition DenseMap.h:348
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition DenseMap.h:288
ValueT lookup_or(const_arg_type_t< KeyT > Val, U &&Default) const
Definition DenseMap.h:204
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition DenseMap.h:381
static unsigned getHashValue(const LookupKeyT &Val)
Definition DenseMap.h:448
ValueT & operator[](const KeyT &Key)
Definition DenseMap.h:330
BucketT value_type
Definition DenseMap.h:72
auto keys() const
Definition DenseMap.h:101
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
void erase(iterator I)
Definition DenseMap.h:322
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:280
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition DenseMap.h:114
ValueT & operator[](KeyT &&Key)
Definition DenseMap.h:334
auto values() const
Definition DenseMap.h:105
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition DenseMap.h:662
std::conditional_t< IsConst, const BucketT, BucketT > value_type
Definition DenseMap.h:1179
static DenseMapIterator makeIterator(pointer P, iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1218
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1253
DenseMapIterator & operator++()
Definition DenseMap.h:1258
pointer operator->() const
Definition DenseMap.h:1240
reference operator*() const
Definition DenseMap.h:1235
static DenseMapIterator makeBegin(iterator_range< pointer > Buckets, bool IsEmpty, const DebugEpochBase &Epoch)
Definition DenseMap.h:1200
DenseMapIterator operator++(int)
Definition DenseMap.h:1265
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition DenseMap.h:1231
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1242
static DenseMapIterator makeEnd(iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1212
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:743
DenseMap(unsigned NumElementsToReserve=0)
Create a DenseMap with an optional NumElementsToReserve to guarantee that this number of elements can...
Definition DenseMap.h:720
void copyFrom(const DenseMap &other)
Definition DenseMap.h:774
void shrink_and_clear()
Definition DenseMap.h:805
DenseMap & operator=(DenseMap &&other)
Definition DenseMap.h:766
void grow(unsigned AtLeast)
Definition DenseMap.h:785
DenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:740
DenseMap(const DenseMap &other)
Definition DenseMap.h:724
void swap(DenseMap &RHS)
Definition DenseMap.h:751
DenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:734
DenseMap(DenseMap &&other)
Definition DenseMap.h:729
DenseMap & operator=(const DenseMap &other)
Definition DenseMap.h:760
void grow(unsigned AtLeast)
Definition DenseMap.h:1027
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:912
void swap(SmallDenseMap &RHS)
Definition DenseMap.h:929
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition DenseMap.h:1006
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition DenseMap.h:1000
SmallDenseMap(unsigned NumElementsToReserve=0)
Definition DenseMap.h:897
void init(unsigned InitNumEntries)
Definition DenseMap.h:1021
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:921
SmallDenseMap(SmallDenseMap &&other)
Definition DenseMap.h:906
SmallDenseMap(const SmallDenseMap &other)
Definition DenseMap.h:901
void copyFrom(const SmallDenseMap &other)
Definition DenseMap.h:1014
SmallDenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:918
A range adaptor for a pair of iterators.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
These are wrappers over isa* function that allow them to be used in generic algorithms such as llvm:a...
Definition ADL.h:123
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:355
@ Offset
Definition DWP.cpp:477
constexpr auto adl_begin(RangeT &&range) -> decltype(adl_detail::begin_impl(std::forward< RangeT >(range)))
Returns the begin iterator to range using std::begin and function found through Argument-Dependent Lo...
Definition ADL.h:78
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition BitVector.h:847
bool operator!=(uint64_t V1, const APInt &V2)
Definition APInt.h:2113
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
constexpr auto adl_end(RangeT &&range) -> decltype(adl_detail::end_impl(std::forward< RangeT >(range)))
Returns the end iterator to range using std::end and functions found through Argument-Dependent Looku...
Definition ADL.h:86
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto map_range(ContainerTy &&C, FuncTy F)
Definition STLExtras.h:366
LLVM_ABI LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:15
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:27
constexpr bool shouldReverseIterate()
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:384
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
A suitably aligned and sized character array member which can hold elements of any type.
Definition AlignOf.h:22
std::conditional_t< std::is_pointer_v< T >, typename add_const_past_pointer< T >::type, const T & > type
Definition type_traits.h:54
const ValueT & getSecond() const
Definition DenseMap.h:51
const KeyT & getFirst() const
Definition DenseMap.h:49