23 #ifndef KOKKOS_UNORDERED_MAP_HPP
24 #define KOKKOS_UNORDERED_MAP_HPP
25 #ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
26 #define KOKKOS_IMPL_PUBLIC_INCLUDE
27 #define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
30 #include <Kokkos_Core.hpp>
31 #include <Kokkos_Functional.hpp>
33 #include <Kokkos_Bitset.hpp>
35 #include <impl/Kokkos_Traits.hpp>
36 #include <impl/Kokkos_UnorderedMap_impl.hpp>
37 #include <impl/Kokkos_ViewCtor.hpp>
43 enum :
unsigned { UnorderedMapInvalidIndex = ~0u };
61 enum Status : uint32_t {
64 FREED_EXISTING = 1u << 29,
65 LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
70 KOKKOS_FORCEINLINE_FUNCTION
71 bool success()
const {
return (m_status & SUCCESS); }
74 KOKKOS_FORCEINLINE_FUNCTION
75 bool existing()
const {
return (m_status & EXISTING); }
78 KOKKOS_FORCEINLINE_FUNCTION
79 bool failed()
const {
return m_index == UnorderedMapInvalidIndex; }
83 KOKKOS_FORCEINLINE_FUNCTION
88 KOKKOS_FORCEINLINE_FUNCTION
89 uint32_t
list_position()
const {
return (m_status & LIST_LENGTH_MASK); }
92 KOKKOS_FORCEINLINE_FUNCTION
93 uint32_t
index()
const {
return m_index; }
95 KOKKOS_FORCEINLINE_FUNCTION
98 KOKKOS_FORCEINLINE_FUNCTION
99 void increment_list_position() {
103 KOKKOS_FORCEINLINE_FUNCTION
104 void set_existing(uint32_t i,
bool arg_freed_existing) {
107 EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) |
list_position();
110 KOKKOS_FORCEINLINE_FUNCTION
111 void set_success(uint32_t i) {
135 template <
class ValueTypeView,
class ValuesIdxType>
137 using value_type =
typename ValueTypeView::non_const_value_type;
140 void op(ValueTypeView, ValuesIdxType,
const value_type)
const {}
144 void op(ValueTypeView values, ValuesIdxType values_idx,
145 const value_type v)
const {
146 Kokkos::atomic_add(values.data() + values_idx, v);
206 template <
typename Key,
typename Value,
207 typename Device = Kokkos::DefaultExecutionSpace,
208 typename Hasher = pod_hash<std::remove_const_t<Key>>,
209 typename EqualTo = pod_equal_to<std::remove_const_t<Key>>>
212 using host_mirror_space =
213 typename ViewTraits<Key, Device, void, void>::host_mirror_space;
219 using declared_key_type = Key;
220 using key_type = std::remove_const_t<declared_key_type>;
221 using const_key_type = std::add_const_t<key_type>;
224 using declared_value_type = Value;
225 using value_type = std::remove_const_t<declared_value_type>;
226 using const_value_type = std::add_const_t<value_type>;
228 using device_type = Device;
229 using execution_space =
typename Device::execution_space;
230 using hasher_type = Hasher;
231 using equal_to_type = EqualTo;
232 using size_type = uint32_t;
236 UnorderedMap<declared_key_type, declared_value_type, device_type,
237 hasher_type, equal_to_type>;
239 hasher_type, equal_to_type>;
241 UnorderedMap<const_key_type, value_type, device_type, hasher_type,
244 device_type, hasher_type, equal_to_type>;
246 static constexpr
bool is_set = std::is_void_v<value_type>;
247 static constexpr
bool has_const_key =
248 std::is_same_v<const_key_type, declared_key_type>;
249 static constexpr
bool has_const_value =
250 is_set || std::is_same_v<const_value_type, declared_value_type>;
252 static constexpr
bool is_insertable_map =
253 !has_const_key && (is_set || !has_const_value);
254 static constexpr
bool is_modifiable_map = has_const_key && !has_const_value;
255 static constexpr
bool is_const_map = has_const_key && has_const_value;
262 using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
266 enum : size_type { invalid_index = ~static_cast<size_type>(0) };
268 using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
270 using key_type_view = std::conditional_t<
274 using value_type_view = std::conditional_t<
275 is_insertable_map || is_modifiable_map,
279 using size_type_view = std::conditional_t<
283 using bitset_type = std::conditional_t<is_insertable_map, Bitset<Device>,
286 enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
287 enum { num_scalars = 3 };
293 using default_op_type =
304 UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
305 equal_to_type equal_to = equal_to_type())
306 :
UnorderedMap(Kokkos::view_alloc(), capacity_hint, hasher, equal_to) {}
308 template <
class... P>
310 size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
311 equal_to_type equal_to = equal_to_type())
312 : m_bounded_insert(true), m_hasher(hasher), m_equal_to(equal_to) {
313 if (!is_insertable_map) {
314 Kokkos::Impl::throw_runtime_exception(
315 "Cannot construct a non-insertable (i.e. const key_type) "
320 using alloc_prop_t = std::decay_t<decltype(arg_prop)>;
321 static_assert(alloc_prop_t::initialize,
322 "Allocation property 'initialize' should be true.");
324 !alloc_prop_t::has_pointer,
325 "Allocation properties should not contain the 'pointer' property.");
329 const auto prop_copy =
330 Impl::with_properties_if_unset(arg_prop, std::string(
"UnorderedMap"));
331 const auto prop_copy_noinit =
332 Impl::with_properties_if_unset(prop_copy, Kokkos::WithoutInitializing);
336 Kokkos::DefaultHostExecutionSpace{},
337 Impl::get_property<Impl::LabelTag>(prop_copy) +
" - size"));
339 m_available_indexes =
340 bitset_type(Kokkos::Impl::append_to_label(prop_copy,
" - bitset"),
341 calculate_capacity(capacity_hint));
343 m_hash_lists = size_type_view(
344 Kokkos::Impl::append_to_label(prop_copy_noinit,
" - hash list"),
345 Impl::find_hash_size(capacity()));
347 m_next_index = size_type_view(
348 Kokkos::Impl::append_to_label(prop_copy_noinit,
" - next index"),
352 m_keys = key_type_view(Kokkos::Impl::append_to_label(prop_copy,
" - keys"),
356 value_type_view(Kokkos::Impl::append_to_label(prop_copy,
" - values"),
357 is_set ? 0 : capacity());
360 scalars_view(Kokkos::Impl::append_to_label(prop_copy,
" - scalars"));
368 if constexpr (alloc_prop_t::has_execution_space) {
369 const auto &space = Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
370 Kokkos::deep_copy(space, m_hash_lists, invalid_index);
371 Kokkos::deep_copy(space, m_next_index, invalid_index);
373 Kokkos::deep_copy(m_hash_lists, invalid_index);
374 Kokkos::deep_copy(m_next_index, invalid_index);
378 void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
380 histogram_type get_histogram() {
return histogram_type(*
this); }
384 m_bounded_insert =
true;
386 if (capacity() == 0)
return;
388 m_available_indexes.clear();
390 Kokkos::deep_copy(m_hash_lists, invalid_index);
391 Kokkos::deep_copy(m_next_index, invalid_index);
393 const key_type tmp = key_type();
394 Kokkos::deep_copy(m_keys, tmp);
396 Kokkos::deep_copy(m_scalars, 0);
400 KOKKOS_INLINE_FUNCTION constexpr
bool is_allocated()
const {
401 return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
402 m_scalars.is_allocated());
415 bool rehash(size_type requested_capacity = 0) {
416 const bool bounded_insert = (capacity() == 0) || (size() == 0u);
417 return rehash(requested_capacity, bounded_insert);
420 bool rehash(size_type requested_capacity,
bool bounded_insert) {
421 if (!is_insertable_map)
return false;
423 const size_type curr_size = size();
425 (requested_capacity < curr_size) ? curr_size : requested_capacity;
427 insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
430 tmp.m_bounded_insert =
false;
431 Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *
this);
434 tmp.m_bounded_insert = bounded_insert;
449 if (capacity() == 0u)
return 0u;
451 m_size() = m_available_indexes.count();
452 reset_flag(modified_idx);
464 bool erasable()
const {
465 return is_insertable_map ? get_flag(erasable_idx) : false;
469 bool result = !erasable();
470 if (is_insertable_map && result) {
471 execution_space().fence(
472 "Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
474 set_flag(erasable_idx);
480 bool result = erasable();
481 if (is_insertable_map && result) {
482 execution_space().fence(
483 "Kokkos::UnorderedMap::end_erase: fence before erasing");
484 Impl::UnorderedMapErase<declared_map_type> f(*
this);
486 execution_space().fence(
487 "Kokkos::UnorderedMap::end_erase: fence after erasing");
488 reset_flag(erasable_idx);
497 KOKKOS_FORCEINLINE_FUNCTION
498 size_type
capacity()
const {
return m_available_indexes.size(); }
510 KOKKOS_INLINE_FUNCTION
527 template <
typename InsertOpType = default_op_type>
528 KOKKOS_INLINE_FUNCTION insert_result
529 insert(key_type
const &k, impl_value_type
const &v = impl_value_type(),
530 [[maybe_unused]] InsertOpType arg_insert_op = InsertOpType())
const {
531 if constexpr (is_set) {
532 static_assert(std::is_same_v<InsertOpType, default_op_type>,
533 "Insert Operations are not supported on sets.");
538 if (!is_insertable_map || capacity() == 0u ||
539 m_scalars((
int)erasable_idx)) {
543 if (!m_scalars((
int)modified_idx)) {
544 m_scalars((
int)modified_idx) =
true;
547 int volatile &failed_insert_ref = m_scalars((
int)failed_insert_idx);
549 const size_type hash_value = m_hasher(k);
550 const size_type hash_list = hash_value % m_hash_lists.extent(0);
552 size_type *curr_ptr = &m_hash_lists[hash_list];
553 size_type new_index = invalid_index;
556 size_type index_hint =
static_cast<size_type
>(
557 (
static_cast<double>(hash_list) * capacity()) / m_hash_lists.extent(0));
559 size_type find_attempts = 0;
561 enum :
unsigned { bounded_find_attempts = 32u };
562 const size_type max_attempts =
564 (bounded_find_attempts < m_available_indexes.max_hint()))
565 ? bounded_find_attempts
566 : m_available_indexes.max_hint();
568 bool not_done =
true;
579 #ifdef KOKKOS_ENABLE_SYCL
580 size_type curr = Kokkos::atomic_load(curr_ptr);
582 size_type curr = volatile_load(curr_ptr);
585 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
586 &m_keys[curr != invalid_index ? curr : 0]);
590 while (curr != invalid_index && !m_equal_to(
591 #ifdef KOKKOS_ENABLE_SYCL
592 Kokkos::atomic_load(&m_keys[curr])
594 volatile_load(&m_keys[curr])
598 result.increment_list_position();
600 curr_ptr = &m_next_index[curr];
601 #ifdef KOKKOS_ENABLE_SYCL
602 curr = Kokkos::atomic_load(curr_ptr);
604 curr = volatile_load(curr_ptr);
606 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
607 &m_keys[curr != invalid_index ? curr : 0]);
612 if (curr != invalid_index) {
613 const bool free_existing = new_index != invalid_index;
617 if (!m_available_indexes.reset(new_index)) {
618 Kokkos::printf(
"Unable to free existing\n");
622 result.set_existing(curr, free_existing);
623 if constexpr (!is_set) {
624 arg_insert_op.op(m_values, curr, v);
634 if (new_index == invalid_index) {
637 Kokkos::tie(found, index_hint) =
638 m_available_indexes.find_any_unset_near(index_hint, hash_list);
641 if (!found && ++find_attempts >= max_attempts) {
642 failed_insert_ref =
true;
644 }
else if (m_available_indexes.set(index_hint)) {
645 new_index = index_hint;
647 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
649 #ifdef KOKKOS_ENABLE_SYCL
650 Kokkos::atomic_store(&m_keys[new_index], k);
652 m_keys[new_index] = k;
656 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
657 #ifdef KOKKOS_ENABLE_SYCL
658 Kokkos::atomic_store(&m_values[new_index], v);
660 m_values[new_index] = v;
664 #ifndef KOKKOS_ENABLE_SYCL
669 }
else if (failed_insert_ref) {
676 if (new_index != invalid_index &&
677 curr == atomic_compare_exchange(
678 curr_ptr, static_cast<size_type>(invalid_index),
681 result.set_success(new_index);
690 KOKKOS_INLINE_FUNCTION
691 bool erase(key_type
const &k)
const {
694 if (is_insertable_map && 0u < capacity() && m_scalars((
int)erasable_idx)) {
695 if (!m_scalars((
int)modified_idx)) {
696 m_scalars((
int)modified_idx) =
true;
699 size_type
index = find(k);
700 if (valid_at(index)) {
701 m_available_indexes.reset(index);
716 KOKKOS_INLINE_FUNCTION
717 size_type
find(
const key_type &k)
const {
718 size_type curr = 0u < capacity()
719 ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
722 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
723 while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
724 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
725 &m_keys[curr != invalid_index ? curr : 0]);
726 curr = m_next_index[curr];
736 KOKKOS_INLINE_FUNCTION
737 bool exists(
const key_type &k)
const {
return valid_at(find(k)); }
747 template <
typename Dummy = value_type>
748 KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
749 !std::is_void<Dummy>::value,
750 std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
752 KOKKOS_EXPECTS(i < capacity());
762 KOKKOS_FORCEINLINE_FUNCTION
764 KOKKOS_EXPECTS(i < capacity());
768 KOKKOS_FORCEINLINE_FUNCTION
769 bool valid_at(size_type i)
const {
return m_available_indexes.test(i); }
771 template <
typename SKey,
typename SValue>
773 UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src,
775 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
776 SKey, SValue>::value,
778 : m_bounded_insert(src.m_bounded_insert),
779 m_hasher(src.m_hasher),
780 m_equal_to(src.m_equal_to),
782 m_available_indexes(src.m_available_indexes),
783 m_hash_lists(src.m_hash_lists),
784 m_next_index(src.m_next_index),
786 m_values(src.m_values),
787 m_scalars(src.m_scalars) {}
789 template <
typename SKey,
typename SValue>
791 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
794 operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src) {
795 m_bounded_insert = src.m_bounded_insert;
796 m_hasher = src.m_hasher;
797 m_equal_to = src.m_equal_to;
799 m_available_indexes = src.m_available_indexes;
800 m_hash_lists = src.m_hash_lists;
801 m_next_index = src.m_next_index;
803 m_values = src.m_values;
804 m_scalars = src.m_scalars;
810 template <
typename SKey,
typename SValue,
typename SDevice>
811 std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
812 std::is_same<std::remove_const_t<SValue>, value_type>::value>
814 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
815 if (m_hash_lists.data() != src.m_hash_lists.data()) {
823 template <
typename SKey,
typename SValue,
typename SDevice>
824 std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
825 std::is_same<std::remove_const_t<SValue>, value_type>::value>
827 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
828 insertable_map_type tmp;
830 tmp.m_bounded_insert = src.m_bounded_insert;
831 tmp.m_hasher = src.m_hasher;
832 tmp.m_equal_to = src.m_equal_to;
833 tmp.m_size() = src.m_size();
834 tmp.m_available_indexes = bitset_type(src.capacity());
835 tmp.m_hash_lists = size_type_view(
836 view_alloc(WithoutInitializing,
"UnorderedMap hash list"),
837 src.m_hash_lists.extent(0));
838 tmp.m_next_index = size_type_view(
839 view_alloc(WithoutInitializing,
"UnorderedMap next index"),
840 src.m_next_index.extent(0));
842 key_type_view(view_alloc(WithoutInitializing,
"UnorderedMap keys"),
843 src.m_keys.extent(0));
845 value_type_view(view_alloc(WithoutInitializing,
"UnorderedMap values"),
846 src.m_values.extent(0));
847 tmp.m_scalars = scalars_view(
"UnorderedMap scalars");
854 template <
typename SKey,
typename SValue,
typename SDevice>
855 std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
856 std::is_same<std::remove_const_t<SValue>, value_type>::value>
858 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
859 #ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
861 KOKKOS_EXPECTS(capacity() == src.capacity());
863 if (capacity() != src.capacity()) {
865 #ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
866 Kokkos::Impl::log_warning(
867 "Warning: deep_copy_view() allocating views is deprecated. Must call "
868 "with UnorderedMaps of identical capacity, or use "
869 "create_copy_view().\n");
874 if (m_hash_lists.data() != src.m_hash_lists.data()) {
875 Kokkos::deep_copy(m_available_indexes, src.m_available_indexes);
877 using raw_deep_copy =
878 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
879 typename SDevice::memory_space>;
881 raw_deep_copy(m_hash_lists.data(), src.m_hash_lists.data(),
882 sizeof(size_type) * src.m_hash_lists.extent(0));
883 raw_deep_copy(m_next_index.data(), src.m_next_index.data(),
884 sizeof(size_type) * src.m_next_index.extent(0));
885 raw_deep_copy(m_keys.data(), src.m_keys.data(),
886 sizeof(key_type) * src.m_keys.extent(0));
888 raw_deep_copy(m_values.data(), src.m_values.data(),
889 sizeof(impl_value_type) * src.m_values.extent(0));
891 raw_deep_copy(m_scalars.data(), src.m_scalars.data(),
892 sizeof(int) * num_scalars);
895 "Kokkos::UnorderedMap::deep_copy_view: fence after copy to dst.");
901 bool modified()
const {
return get_flag(modified_idx); }
903 void set_flag(
int flag)
const {
904 using raw_deep_copy =
905 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
907 const int true_ =
true;
908 raw_deep_copy(m_scalars.data() + flag, &true_,
sizeof(int));
910 "Kokkos::UnorderedMap::set_flag: fence after copying flag from "
914 void reset_flag(
int flag)
const {
915 using raw_deep_copy =
916 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
918 const int false_ =
false;
919 raw_deep_copy(m_scalars.data() + flag, &false_,
sizeof(int));
921 "Kokkos::UnorderedMap::reset_flag: fence after copying flag from "
925 bool get_flag(
int flag)
const {
926 using raw_deep_copy =
928 typename device_type::memory_space>;
930 raw_deep_copy(&result, m_scalars.data() + flag,
sizeof(int));
932 "Kokkos::UnorderedMap::get_flag: fence after copy to return value in "
937 static uint32_t calculate_capacity(uint32_t capacity_hint) {
940 ? ((
static_cast<uint32_t
>(7ull * capacity_hint / 6u) + 127u) /
947 bool m_bounded_insert;
948 hasher_type m_hasher;
949 equal_to_type m_equal_to;
950 using shared_size_t = View<size_type, Kokkos::DefaultHostExecutionSpace>;
951 shared_size_t m_size;
952 bitset_type m_available_indexes;
953 size_type_view m_hash_lists;
954 size_type_view m_next_index;
955 key_type_view m_keys;
956 value_type_view m_values;
957 scalars_view m_scalars;
959 template <
typename KKey,
typename VValue,
typename DDevice,
typename HHash,
961 friend class UnorderedMap;
963 template <
typename UMap>
964 friend struct Impl::UnorderedMapErase;
966 template <
typename UMap>
967 friend struct Impl::UnorderedMapHistogram;
969 template <
typename UMap>
970 friend struct Impl::UnorderedMapPrint;
974 template <
typename DKey,
typename DT,
typename DDevice,
typename SKey,
975 typename ST,
typename SDevice,
typename Hasher,
typename EqualTo>
976 inline void deep_copy(
977 UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
978 const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
979 dst.deep_copy_view(src);
983 template <
typename Key,
typename ValueType,
typename Device,
typename Hasher,
985 typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
987 const UnorderedMap<Key, ValueType, Device, Hasher, EqualTo> &src) {
988 typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
990 dst.allocate_view(src);
996 #ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
997 #undef KOKKOS_IMPL_PUBLIC_INCLUDE
998 #undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
1000 #endif // KOKKOS_UNORDERED_MAP_HPP
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type(), [[maybe_unused]] InsertOpType arg_insert_op=InsertOpType()) const
UnorderedMap(size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
size_type size() const
The number of entries in the table.
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
Operations applied to the values array upon subsequent insertions.
void clear()
Clear all entries in the table.
View to an array of data.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
First element of the return value of UnorderedMap::insert().
Memory management for host memory.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
bool failed_insert() const
The current number of failed insert() calls.
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t< !std::is_void< Dummy >::value, std::conditional_t< has_const_value, impl_value_type, impl_value_type & > > value_at(size_type i) const
Get the value with i as its direct index.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
Thread-safe, performance-portable lookup table.
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficient capacity.
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
UnorderedMap(const Impl::ViewCtorProp< P...> &arg_prop, size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())