23 #ifndef KOKKOS_UNORDERED_MAP_HPP
24 #define KOKKOS_UNORDERED_MAP_HPP
25 #ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
26 #define KOKKOS_IMPL_PUBLIC_INCLUDE
27 #define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
30 #include <Kokkos_Core.hpp>
31 #include <Kokkos_Functional.hpp>
33 #include <Kokkos_Bitset.hpp>
35 #include <impl/Kokkos_Traits.hpp>
36 #include <impl/Kokkos_UnorderedMap_impl.hpp>
37 #include <View/Kokkos_ViewCtor.hpp>
41 #ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
42 #if defined(KOKKOS_COMPILER_GNU) && !defined(__PGIC__) && \
43 !defined(__CUDA_ARCH__)
45 #define KOKKOS_IMPL_NONTEMPORAL_PREFETCH_LOAD(addr) \
46 __builtin_prefetch(addr, 0, 0)
47 #define KOKKOS_IMPL_NONTEMPORAL_PREFETCH_STORE(addr) \
48 __builtin_prefetch(addr, 1, 0)
52 #define KOKKOS_IMPL_NONTEMPORAL_PREFETCH_LOAD(addr) ((void)0)
53 #define KOKKOS_IMPL_NONTEMPORAL_PREFETCH_STORE(addr) ((void)0)
57 #define KOKKOS_IMPL_NONTEMPORAL_PREFETCH_LOAD(addr) \
58 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr)
59 #define KOKKOS_IMPL_NONTEMPORAL_PREFETCH_STORE(addr) \
60 KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr)
65 enum :
unsigned { UnorderedMapInvalidIndex = ~0u };
83 enum Status : uint32_t {
86 FREED_EXISTING = 1u << 29,
87 LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
92 KOKKOS_FORCEINLINE_FUNCTION
93 bool success()
const {
return (m_status & SUCCESS); }
96 KOKKOS_FORCEINLINE_FUNCTION
97 bool existing()
const {
return (m_status & EXISTING); }
100 KOKKOS_FORCEINLINE_FUNCTION
101 bool failed()
const {
return m_index == UnorderedMapInvalidIndex; }
105 KOKKOS_FORCEINLINE_FUNCTION
110 KOKKOS_FORCEINLINE_FUNCTION
114 KOKKOS_FORCEINLINE_FUNCTION
115 uint32_t
index()
const {
return m_index; }
117 KOKKOS_FORCEINLINE_FUNCTION
120 KOKKOS_FORCEINLINE_FUNCTION
121 void increment_list_position() {
125 KOKKOS_FORCEINLINE_FUNCTION
126 void set_existing(uint32_t i,
bool arg_freed_existing) {
129 EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) |
list_position();
132 KOKKOS_FORCEINLINE_FUNCTION
133 void set_success(uint32_t i) {
157 template <
class ValueTypeView,
class ValuesIdxType>
159 using value_type =
typename ValueTypeView::non_const_value_type;
162 void op(ValueTypeView, ValuesIdxType,
const value_type)
const {}
166 void op(ValueTypeView values, ValuesIdxType values_idx,
167 const value_type v)
const {
168 Kokkos::atomic_add(values.data() + values_idx, v);
228 template <
typename Key,
typename Value,
229 typename Device = Kokkos::DefaultExecutionSpace,
230 typename Hasher = pod_hash<std::remove_const_t<Key>>,
231 typename EqualTo = pod_equal_to<std::remove_const_t<Key>>>
234 using host_mirror_space =
235 typename ViewTraits<Key, Device, void, void>::host_mirror_space;
241 using declared_key_type = Key;
242 using key_type = std::remove_const_t<declared_key_type>;
243 using const_key_type = std::add_const_t<key_type>;
246 using declared_value_type = Value;
247 using value_type = std::remove_const_t<declared_value_type>;
248 using const_value_type = std::add_const_t<value_type>;
250 using device_type = Device;
251 using execution_space =
typename Device::execution_space;
252 using hasher_type = Hasher;
253 using equal_to_type = EqualTo;
254 using size_type = uint32_t;
258 UnorderedMap<declared_key_type, declared_value_type, device_type,
259 hasher_type, equal_to_type>;
261 hasher_type, equal_to_type>;
263 UnorderedMap<const_key_type, value_type, device_type, hasher_type,
266 device_type, hasher_type, equal_to_type>;
268 static constexpr
bool is_set = std::is_void_v<value_type>;
269 static constexpr
bool has_const_key =
270 std::is_same_v<const_key_type, declared_key_type>;
271 static constexpr
bool has_const_value =
272 is_set || std::is_same_v<const_value_type, declared_value_type>;
274 static constexpr
bool is_insertable_map =
275 !has_const_key && (is_set || !has_const_value);
276 static constexpr
bool is_modifiable_map = has_const_key && !has_const_value;
277 static constexpr
bool is_const_map = has_const_key && has_const_value;
284 using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
288 enum : size_type { invalid_index = ~static_cast<size_type>(0) };
290 using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
292 using key_type_view = std::conditional_t<
293 is_insertable_map, View<key_type *, device_type>,
294 View<const key_type *, device_type, MemoryTraits<RandomAccess>>>;
296 using value_type_view = std::conditional_t<
297 is_insertable_map || is_modifiable_map,
298 View<impl_value_type *, device_type>,
299 View<const impl_value_type *, device_type, MemoryTraits<RandomAccess>>>;
301 using size_type_view = std::conditional_t<
302 is_insertable_map, View<size_type *, device_type>,
303 View<const size_type *, device_type, MemoryTraits<RandomAccess>>>;
305 using bitset_type = std::conditional_t<is_insertable_map, Bitset<Device>,
308 enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
309 enum { num_scalars = 3 };
310 using scalars_view = View<int[num_scalars], LayoutLeft, device_type>;
315 using default_op_type =
326 UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
327 equal_to_type equal_to = equal_to_type())
328 :
UnorderedMap(Kokkos::view_alloc(), capacity_hint, hasher, equal_to) {}
330 template <
class... P>
332 size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
333 equal_to_type equal_to = equal_to_type())
334 : m_bounded_insert(true), m_hasher(hasher), m_equal_to(equal_to) {
335 if (!is_insertable_map) {
336 Kokkos::Impl::throw_runtime_exception(
337 "Cannot construct a non-insertable (i.e. const key_type) "
342 using alloc_prop_t = std::decay_t<decltype(arg_prop)>;
343 static_assert(alloc_prop_t::initialize,
344 "Allocation property 'initialize' should be true.");
346 !alloc_prop_t::has_pointer,
347 "Allocation properties should not contain the 'pointer' property.");
351 const auto prop_copy =
352 Impl::with_properties_if_unset(arg_prop, std::string(
"UnorderedMap"));
353 const auto prop_copy_noinit =
354 Impl::with_properties_if_unset(prop_copy, Kokkos::WithoutInitializing);
357 m_size = shared_size_t(Kokkos::view_alloc(
358 Kokkos::DefaultHostExecutionSpace{},
359 Impl::get_property<Impl::LabelTag>(prop_copy) +
" - size"));
361 m_available_indexes =
362 bitset_type(Kokkos::Impl::append_to_label(prop_copy,
" - bitset"),
363 calculate_capacity(capacity_hint));
365 m_hash_lists = size_type_view(
366 Kokkos::Impl::append_to_label(prop_copy_noinit,
" - hash list"),
367 Impl::find_hash_size(capacity()));
369 m_next_index = size_type_view(
370 Kokkos::Impl::append_to_label(prop_copy_noinit,
" - next index"),
374 m_keys = key_type_view(Kokkos::Impl::append_to_label(prop_copy,
" - keys"),
378 value_type_view(Kokkos::Impl::append_to_label(prop_copy,
" - values"),
379 is_set ? 0 : capacity());
382 scalars_view(Kokkos::Impl::append_to_label(prop_copy,
" - scalars"));
390 if constexpr (alloc_prop_t::has_execution_space) {
391 const auto &space = Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
392 Kokkos::deep_copy(space, m_hash_lists, invalid_index);
393 Kokkos::deep_copy(space, m_next_index, invalid_index);
395 Kokkos::deep_copy(m_hash_lists, invalid_index);
396 Kokkos::deep_copy(m_next_index, invalid_index);
400 void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
402 histogram_type get_histogram() {
return histogram_type(*
this); }
406 m_bounded_insert =
true;
408 if (capacity() == 0)
return;
410 m_available_indexes.clear();
412 Kokkos::deep_copy(m_hash_lists, invalid_index);
413 Kokkos::deep_copy(m_next_index, invalid_index);
415 const key_type tmp = key_type();
416 Kokkos::deep_copy(m_keys, tmp);
418 Kokkos::deep_copy(m_scalars, 0);
422 KOKKOS_INLINE_FUNCTION constexpr
bool is_allocated()
const {
423 return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
424 m_scalars.is_allocated());
437 bool rehash(size_type requested_capacity = 0) {
438 const bool bounded_insert = (capacity() == 0) || (size() == 0u);
439 return rehash(requested_capacity, bounded_insert);
442 bool rehash(size_type requested_capacity,
bool bounded_insert) {
443 if (!is_insertable_map)
return false;
445 const size_type curr_size = size();
447 (requested_capacity < curr_size) ? curr_size : requested_capacity;
449 insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
452 tmp.m_bounded_insert =
false;
453 Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *
this);
456 tmp.m_bounded_insert = bounded_insert;
471 if (capacity() == 0u)
return 0u;
473 m_size() = m_available_indexes.count();
474 reset_flag(modified_idx);
486 bool erasable()
const {
487 return is_insertable_map ? get_flag(erasable_idx) : false;
491 bool result = !erasable();
492 if (is_insertable_map && result) {
493 execution_space().fence(
494 "Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
496 set_flag(erasable_idx);
502 bool result = erasable();
503 if (is_insertable_map && result) {
504 execution_space().fence(
505 "Kokkos::UnorderedMap::end_erase: fence before erasing");
506 Impl::UnorderedMapErase<declared_map_type> f(*
this);
508 execution_space().fence(
509 "Kokkos::UnorderedMap::end_erase: fence after erasing");
510 reset_flag(erasable_idx);
519 KOKKOS_FORCEINLINE_FUNCTION
520 size_type
capacity()
const {
return m_available_indexes.size(); }
532 KOKKOS_INLINE_FUNCTION
549 template <
typename InsertOpType = default_op_type>
550 KOKKOS_INLINE_FUNCTION insert_result
551 insert(key_type
const &k, impl_value_type
const &v = impl_value_type(),
552 [[maybe_unused]] InsertOpType arg_insert_op = InsertOpType())
const {
553 if constexpr (is_set) {
554 static_assert(std::is_same_v<InsertOpType, default_op_type>,
555 "Insert Operations are not supported on sets.");
560 if (!is_insertable_map || capacity() == 0u ||
561 m_scalars((
int)erasable_idx)) {
565 if (!m_scalars((
int)modified_idx)) {
566 m_scalars((
int)modified_idx) =
true;
569 int volatile &failed_insert_ref = m_scalars((
int)failed_insert_idx);
571 const size_type hash_value = m_hasher(k);
572 const size_type hash_list = hash_value % m_hash_lists.extent(0);
574 size_type *curr_ptr = &m_hash_lists[hash_list];
575 size_type new_index = invalid_index;
578 size_type index_hint =
static_cast<size_type
>(
579 (
static_cast<double>(hash_list) * capacity()) / m_hash_lists.extent(0));
581 size_type find_attempts = 0;
583 enum :
unsigned { bounded_find_attempts = 32u };
584 const size_type max_attempts =
586 (bounded_find_attempts < m_available_indexes.max_hint()))
587 ? bounded_find_attempts
588 : m_available_indexes.max_hint();
590 bool not_done =
true;
601 #ifdef KOKKOS_ENABLE_SYCL
602 size_type curr = Kokkos::atomic_load(curr_ptr);
604 size_type curr = volatile_load(curr_ptr);
607 KOKKOS_IMPL_NONTEMPORAL_PREFETCH_LOAD(
608 &m_keys[curr != invalid_index ? curr : 0]);
612 while (curr != invalid_index && !m_equal_to(
613 #ifdef KOKKOS_ENABLE_SYCL
614 Kokkos::atomic_load(&m_keys[curr])
616 volatile_load(&m_keys[curr])
620 result.increment_list_position();
622 curr_ptr = &m_next_index[curr];
623 #ifdef KOKKOS_ENABLE_SYCL
624 curr = Kokkos::atomic_load(curr_ptr);
626 curr = volatile_load(curr_ptr);
628 KOKKOS_IMPL_NONTEMPORAL_PREFETCH_LOAD(
629 &m_keys[curr != invalid_index ? curr : 0]);
634 if (curr != invalid_index) {
635 const bool free_existing = new_index != invalid_index;
639 if (!m_available_indexes.reset(new_index)) {
640 Kokkos::printf(
"Unable to free existing\n");
644 result.set_existing(curr, free_existing);
645 if constexpr (!is_set) {
646 arg_insert_op.op(m_values, curr, v);
656 if (new_index == invalid_index) {
659 Kokkos::tie(found, index_hint) =
660 m_available_indexes.find_any_unset_near(index_hint, hash_list);
663 if (!found && ++find_attempts >= max_attempts) {
664 failed_insert_ref =
true;
666 }
else if (m_available_indexes.set(index_hint)) {
667 new_index = index_hint;
669 KOKKOS_IMPL_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
671 #ifdef KOKKOS_ENABLE_SYCL
672 Kokkos::atomic_store(&m_keys[new_index], k);
674 m_keys[new_index] = k;
678 KOKKOS_IMPL_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
679 #ifdef KOKKOS_ENABLE_SYCL
680 Kokkos::atomic_store(&m_values[new_index], v);
682 m_values[new_index] = v;
686 #ifndef KOKKOS_ENABLE_SYCL
691 }
else if (failed_insert_ref) {
698 if (new_index != invalid_index &&
699 curr == atomic_compare_exchange(
700 curr_ptr, static_cast<size_type>(invalid_index),
703 result.set_success(new_index);
712 KOKKOS_INLINE_FUNCTION
713 bool erase(key_type
const &k)
const {
716 if (is_insertable_map && 0u < capacity() && m_scalars((
int)erasable_idx)) {
717 if (!m_scalars((
int)modified_idx)) {
718 m_scalars((
int)modified_idx) =
true;
721 size_type
index = find(k);
722 if (valid_at(index)) {
723 m_available_indexes.reset(index);
738 KOKKOS_INLINE_FUNCTION
739 size_type
find(
const key_type &k)
const {
740 size_type curr = 0u < capacity()
741 ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
744 KOKKOS_IMPL_NONTEMPORAL_PREFETCH_LOAD(
745 &m_keys[curr != invalid_index ? curr : 0]);
746 while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
747 KOKKOS_IMPL_NONTEMPORAL_PREFETCH_LOAD(
748 &m_keys[curr != invalid_index ? curr : 0]);
749 curr = m_next_index[curr];
759 KOKKOS_INLINE_FUNCTION
760 bool exists(
const key_type &k)
const {
return valid_at(find(k)); }
770 template <
typename Dummy = value_type>
771 KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
772 !std::is_void_v<Dummy>,
773 std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
775 KOKKOS_EXPECTS(i < capacity());
785 KOKKOS_FORCEINLINE_FUNCTION
787 KOKKOS_EXPECTS(i < capacity());
791 KOKKOS_FORCEINLINE_FUNCTION
792 bool valid_at(size_type i)
const {
return m_available_indexes.test(i); }
794 template <
typename SKey,
typename SValue>
796 UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src,
798 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
799 SKey, SValue>::value,
801 : m_bounded_insert(src.m_bounded_insert),
802 m_hasher(src.m_hasher),
803 m_equal_to(src.m_equal_to),
805 m_available_indexes(src.m_available_indexes),
806 m_hash_lists(src.m_hash_lists),
807 m_next_index(src.m_next_index),
809 m_values(src.m_values),
810 m_scalars(src.m_scalars) {}
812 template <
typename SKey,
typename SValue>
814 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
817 operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src) {
818 m_bounded_insert = src.m_bounded_insert;
819 m_hasher = src.m_hasher;
820 m_equal_to = src.m_equal_to;
822 m_available_indexes = src.m_available_indexes;
823 m_hash_lists = src.m_hash_lists;
824 m_next_index = src.m_next_index;
826 m_values = src.m_values;
827 m_scalars = src.m_scalars;
833 template <
typename SKey,
typename SValue,
typename SDevice>
834 std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
835 std::is_same_v<std::remove_const_t<SValue>, value_type>>
837 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
838 if (m_hash_lists.data() != src.m_hash_lists.data()) {
846 template <
typename SKey,
typename SValue,
typename SDevice>
847 std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
848 std::is_same_v<std::remove_const_t<SValue>, value_type>>
850 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
851 insertable_map_type tmp;
853 tmp.m_bounded_insert = src.m_bounded_insert;
854 tmp.m_hasher = src.m_hasher;
855 tmp.m_equal_to = src.m_equal_to;
856 tmp.m_size() = src.m_size();
857 tmp.m_available_indexes = bitset_type(src.capacity());
858 tmp.m_hash_lists = size_type_view(
859 view_alloc(WithoutInitializing,
"UnorderedMap hash list"),
860 src.m_hash_lists.extent(0));
861 tmp.m_next_index = size_type_view(
862 view_alloc(WithoutInitializing,
"UnorderedMap next index"),
863 src.m_next_index.extent(0));
865 key_type_view(view_alloc(WithoutInitializing,
"UnorderedMap keys"),
866 src.m_keys.extent(0));
868 value_type_view(view_alloc(WithoutInitializing,
"UnorderedMap values"),
869 src.m_values.extent(0));
870 tmp.m_scalars = scalars_view(
"UnorderedMap scalars");
877 template <
typename SKey,
typename SValue,
typename SDevice>
878 std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
879 std::is_same_v<std::remove_const_t<SValue>, value_type>>
881 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
882 #ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
884 KOKKOS_EXPECTS(capacity() == src.capacity());
886 if (capacity() != src.capacity()) {
888 #ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
889 Kokkos::Impl::log_warning(
890 "Warning: deep_copy_view() allocating views is deprecated. Must call "
891 "with UnorderedMaps of identical capacity, or use "
892 "create_copy_view().\n");
897 if (m_hash_lists.data() != src.m_hash_lists.data()) {
898 Kokkos::deep_copy(m_available_indexes, src.m_available_indexes);
901 typename device_type::execution_space exec_space{};
903 Kokkos::deep_copy(exec_space, m_hash_lists, src.m_hash_lists);
904 Kokkos::deep_copy(exec_space, m_next_index, src.m_next_index);
905 Kokkos::deep_copy(exec_space, m_keys, src.m_keys);
907 Kokkos::deep_copy(exec_space, m_values, src.m_values);
909 Kokkos::deep_copy(exec_space, m_scalars, src.m_scalars);
912 "Kokkos::UnorderedMap::deep_copy_view: fence after copy to dst.");
918 bool modified()
const {
return get_flag(modified_idx); }
920 void set_flag(
int flag)
const {
921 auto scalar = Kokkos::subview(m_scalars, flag);
922 Kokkos::deep_copy(
typename device_type::execution_space{}, scalar,
923 static_cast<int>(
true));
925 "Kokkos::UnorderedMap::set_flag: fence after copying flag from "
929 void reset_flag(
int flag)
const {
930 auto scalar = Kokkos::subview(m_scalars, flag);
931 Kokkos::deep_copy(
typename device_type::execution_space{}, scalar,
932 static_cast<int>(
false));
934 "Kokkos::UnorderedMap::reset_flag: fence after copying flag from "
938 bool get_flag(
int flag)
const {
939 const auto scalar = Kokkos::subview(m_scalars, flag);
941 Kokkos::deep_copy(
typename device_type::execution_space{}, result, scalar);
943 "Kokkos::UnorderedMap::get_flag: fence after copy to return value in "
948 static uint32_t calculate_capacity(uint32_t capacity_hint) {
951 ? ((
static_cast<uint32_t
>(7ull * capacity_hint / 6u) + 127u) /
958 bool m_bounded_insert;
959 hasher_type m_hasher;
960 equal_to_type m_equal_to;
961 using shared_size_t = View<size_type, Kokkos::DefaultHostExecutionSpace>;
962 shared_size_t m_size;
963 bitset_type m_available_indexes;
964 size_type_view m_hash_lists;
965 size_type_view m_next_index;
966 key_type_view m_keys;
967 value_type_view m_values;
968 scalars_view m_scalars;
970 template <
typename KKey,
typename VValue,
typename DDevice,
typename HHash,
972 friend class UnorderedMap;
974 template <
typename UMap>
975 friend struct Impl::UnorderedMapErase;
977 template <
typename UMap>
978 friend struct Impl::UnorderedMapHistogram;
980 template <
typename UMap>
981 friend struct Impl::UnorderedMapPrint;
985 template <
typename DKey,
typename DT,
typename DDevice,
typename SKey,
986 typename ST,
typename SDevice,
typename Hasher,
typename EqualTo>
987 inline void deep_copy(
988 UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
989 const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
990 dst.deep_copy_view(src);
994 template <
typename Key,
typename ValueType,
typename Device,
typename Hasher,
996 typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
998 const UnorderedMap<Key, ValueType, Device, Hasher, EqualTo> &src) {
999 typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
1001 dst.allocate_view(src);
1007 #ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
1008 #undef KOKKOS_IMPL_PUBLIC_INCLUDE
1009 #undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
1011 #endif // KOKKOS_UNORDERED_MAP_HPP
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type(), [[maybe_unused]] InsertOpType arg_insert_op=InsertOpType()) const
UnorderedMap(size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t< !std::is_void_v< Dummy >, std::conditional_t< has_const_value, impl_value_type, impl_value_type & > > value_at(size_type i) const
Get the value with i as its direct index.
size_type size() const
The number of entries in the table.
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
Operations applied to the values array upon subsequent insertions.
void clear()
Clear all entries in the table.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
First element of the return value of UnorderedMap::insert().
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
bool failed_insert() const
The current number of failed insert() calls.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
Thread-safe, performance-portable lookup table.
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficient capacity.
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
UnorderedMap(const Impl::ViewCtorProp< P...> &arg_prop, size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())