Kokkos Core Kernels Package  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
Kokkos_UnorderedMap.hpp
Go to the documentation of this file.
1 //@HEADER
2 // ************************************************************************
3 //
4 // Kokkos v. 4.0
5 // Copyright (2022) National Technology & Engineering
6 // Solutions of Sandia, LLC (NTESS).
7 //
8 // Under the terms of Contract DE-NA0003525 with NTESS,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
12 // See https://kokkos.org/LICENSE for license information.
13 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
14 //
15 //@HEADER
16 
22 
23 #ifndef KOKKOS_UNORDERED_MAP_HPP
24 #define KOKKOS_UNORDERED_MAP_HPP
25 #ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
26 #define KOKKOS_IMPL_PUBLIC_INCLUDE
27 #define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
28 #endif
29 
30 #include <Kokkos_Core.hpp>
31 #include <Kokkos_Functional.hpp>
32 
33 #include <Kokkos_Bitset.hpp>
34 
35 #include <impl/Kokkos_Traits.hpp>
36 #include <impl/Kokkos_UnorderedMap_impl.hpp>
37 #include <impl/Kokkos_ViewCtor.hpp>
38 
39 #include <cstdint>
40 
41 namespace Kokkos {
42 
43 enum : unsigned { UnorderedMapInvalidIndex = ~0u };
44 
58 
60  private:
61  enum Status : uint32_t {
62  SUCCESS = 1u << 31,
63  EXISTING = 1u << 30,
64  FREED_EXISTING = 1u << 29,
65  LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
66  };
67 
68  public:
70  KOKKOS_FORCEINLINE_FUNCTION
71  bool success() const { return (m_status & SUCCESS); }
72 
74  KOKKOS_FORCEINLINE_FUNCTION
75  bool existing() const { return (m_status & EXISTING); }
76 
78  KOKKOS_FORCEINLINE_FUNCTION
79  bool failed() const { return m_index == UnorderedMapInvalidIndex; }
80 
83  KOKKOS_FORCEINLINE_FUNCTION
84  bool freed_existing() const { return (m_status & FREED_EXISTING); }
85 
88  KOKKOS_FORCEINLINE_FUNCTION
89  uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); }
90 
92  KOKKOS_FORCEINLINE_FUNCTION
93  uint32_t index() const { return m_index; }
94 
95  KOKKOS_FORCEINLINE_FUNCTION
96  UnorderedMapInsertResult() : m_index(UnorderedMapInvalidIndex), m_status(0) {}
97 
98  KOKKOS_FORCEINLINE_FUNCTION
99  void increment_list_position() {
100  m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u;
101  }
102 
103  KOKKOS_FORCEINLINE_FUNCTION
104  void set_existing(uint32_t i, bool arg_freed_existing) {
105  m_index = i;
106  m_status =
107  EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position();
108  }
109 
110  KOKKOS_FORCEINLINE_FUNCTION
111  void set_success(uint32_t i) {
112  m_index = i;
113  m_status = SUCCESS | list_position();
114  }
115 
116  private:
117  uint32_t m_index;
118  uint32_t m_status;
119 };
120 
135 template <class ValueTypeView, class ValuesIdxType>
137  using value_type = typename ValueTypeView::non_const_value_type;
138  struct NoOp {
139  KOKKOS_FUNCTION
140  void op(ValueTypeView, ValuesIdxType, const value_type) const {}
141  };
142  struct AtomicAdd {
143  KOKKOS_FUNCTION
144  void op(ValueTypeView values, ValuesIdxType values_idx,
145  const value_type v) const {
146  Kokkos::atomic_add(values.data() + values_idx, v);
147  }
148  };
149 };
150 
206 template <typename Key, typename Value,
207  typename Device = Kokkos::DefaultExecutionSpace,
208  typename Hasher = pod_hash<std::remove_const_t<Key>>,
209  typename EqualTo = pod_equal_to<std::remove_const_t<Key>>>
211  private:
212  using host_mirror_space =
213  typename ViewTraits<Key, Device, void, void>::host_mirror_space;
214 
215  public:
217 
218  // key_types
219  using declared_key_type = Key;
220  using key_type = std::remove_const_t<declared_key_type>;
221  using const_key_type = std::add_const_t<key_type>;
222 
223  // value_types
224  using declared_value_type = Value;
225  using value_type = std::remove_const_t<declared_value_type>;
226  using const_value_type = std::add_const_t<value_type>;
227 
228  using device_type = Device;
229  using execution_space = typename Device::execution_space;
230  using hasher_type = Hasher;
231  using equal_to_type = EqualTo;
232  using size_type = uint32_t;
233 
234  // map_types
235  using declared_map_type =
236  UnorderedMap<declared_key_type, declared_value_type, device_type,
237  hasher_type, equal_to_type>;
238  using insertable_map_type = UnorderedMap<key_type, value_type, device_type,
239  hasher_type, equal_to_type>;
240  using modifiable_map_type =
241  UnorderedMap<const_key_type, value_type, device_type, hasher_type,
242  equal_to_type>;
243  using const_map_type = UnorderedMap<const_key_type, const_value_type,
244  device_type, hasher_type, equal_to_type>;
245 
246  static constexpr bool is_set = std::is_void_v<value_type>;
247  static constexpr bool has_const_key =
248  std::is_same_v<const_key_type, declared_key_type>;
249  static constexpr bool has_const_value =
250  is_set || std::is_same_v<const_value_type, declared_value_type>;
251 
252  static constexpr bool is_insertable_map =
253  !has_const_key && (is_set || !has_const_value);
254  static constexpr bool is_modifiable_map = has_const_key && !has_const_value;
255  static constexpr bool is_const_map = has_const_key && has_const_value;
256 
258 
259  using HostMirror =
261 
262  using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
264 
265  private:
266  enum : size_type { invalid_index = ~static_cast<size_type>(0) };
267 
268  using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
269 
270  using key_type_view = std::conditional_t<
271  is_insertable_map, View<key_type *, device_type>,
273 
274  using value_type_view = std::conditional_t<
275  is_insertable_map || is_modifiable_map,
278 
279  using size_type_view = std::conditional_t<
280  is_insertable_map, View<size_type *, device_type>,
282 
283  using bitset_type = std::conditional_t<is_insertable_map, Bitset<Device>,
285 
286  enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
287  enum { num_scalars = 3 };
289 
290  public:
292 
293  using default_op_type =
295 
304  UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
305  equal_to_type equal_to = equal_to_type())
306  : UnorderedMap(Kokkos::view_alloc(), capacity_hint, hasher, equal_to) {}
307 
308  template <class... P>
309  UnorderedMap(const Impl::ViewCtorProp<P...> &arg_prop,
310  size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
311  equal_to_type equal_to = equal_to_type())
312  : m_bounded_insert(true), m_hasher(hasher), m_equal_to(equal_to) {
313  if (!is_insertable_map) {
314  Kokkos::Impl::throw_runtime_exception(
315  "Cannot construct a non-insertable (i.e. const key_type) "
316  "unordered_map");
317  }
318 
320  using alloc_prop_t = std::decay_t<decltype(arg_prop)>;
321  static_assert(alloc_prop_t::initialize,
322  "Allocation property 'initialize' should be true.");
323  static_assert(
324  !alloc_prop_t::has_pointer,
325  "Allocation properties should not contain the 'pointer' property.");
326 
329  const auto prop_copy =
330  Impl::with_properties_if_unset(arg_prop, std::string("UnorderedMap"));
331  const auto prop_copy_noinit =
332  Impl::with_properties_if_unset(prop_copy, Kokkos::WithoutInitializing);
333 
335  m_size = shared_size_t(Kokkos::view_alloc(
336  Kokkos::DefaultHostExecutionSpace{},
337  Impl::get_property<Impl::LabelTag>(prop_copy) + " - size"));
338 
339  m_available_indexes =
340  bitset_type(Kokkos::Impl::append_to_label(prop_copy, " - bitset"),
341  calculate_capacity(capacity_hint));
342 
343  m_hash_lists = size_type_view(
344  Kokkos::Impl::append_to_label(prop_copy_noinit, " - hash list"),
345  Impl::find_hash_size(capacity()));
346 
347  m_next_index = size_type_view(
348  Kokkos::Impl::append_to_label(prop_copy_noinit, " - next index"),
349  capacity() + 1); // +1 so that the *_at functions can always return a
350  // valid reference
351 
352  m_keys = key_type_view(Kokkos::Impl::append_to_label(prop_copy, " - keys"),
353  capacity());
354 
355  m_values =
356  value_type_view(Kokkos::Impl::append_to_label(prop_copy, " - values"),
357  is_set ? 0 : capacity());
358 
359  m_scalars =
360  scalars_view(Kokkos::Impl::append_to_label(prop_copy, " - scalars"));
361 
368  if constexpr (alloc_prop_t::has_execution_space) {
369  const auto &space = Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
370  Kokkos::deep_copy(space, m_hash_lists, invalid_index);
371  Kokkos::deep_copy(space, m_next_index, invalid_index);
372  } else {
373  Kokkos::deep_copy(m_hash_lists, invalid_index);
374  Kokkos::deep_copy(m_next_index, invalid_index);
375  }
376  }
377 
378  void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
379 
380  histogram_type get_histogram() { return histogram_type(*this); }
381 
383  void clear() {
384  m_bounded_insert = true;
385 
386  if (capacity() == 0) return;
387 
388  m_available_indexes.clear();
389 
390  Kokkos::deep_copy(m_hash_lists, invalid_index);
391  Kokkos::deep_copy(m_next_index, invalid_index);
392  {
393  const key_type tmp = key_type();
394  Kokkos::deep_copy(m_keys, tmp);
395  }
396  Kokkos::deep_copy(m_scalars, 0);
397  m_size() = 0;
398  }
399 
400  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
401  return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
402  m_scalars.is_allocated());
403  }
404 
415  bool rehash(size_type requested_capacity = 0) {
416  const bool bounded_insert = (capacity() == 0) || (size() == 0u);
417  return rehash(requested_capacity, bounded_insert);
418  }
419 
420  bool rehash(size_type requested_capacity, bool bounded_insert) {
421  if (!is_insertable_map) return false;
422 
423  const size_type curr_size = size();
424  requested_capacity =
425  (requested_capacity < curr_size) ? curr_size : requested_capacity;
426 
427  insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
428 
429  if (curr_size) {
430  tmp.m_bounded_insert = false;
431  Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *this);
432  f.apply();
433  }
434  tmp.m_bounded_insert = bounded_insert;
435 
436  *this = tmp;
437 
438  return true;
439  }
440 
448  size_type size() const {
449  if (capacity() == 0u) return 0u;
450  if (modified()) {
451  m_size() = m_available_indexes.count();
452  reset_flag(modified_idx);
453  }
454  return m_size();
455  }
456 
462  bool failed_insert() const { return get_flag(failed_insert_idx); }
463 
464  bool erasable() const {
465  return is_insertable_map ? get_flag(erasable_idx) : false;
466  }
467 
468  bool begin_erase() {
469  bool result = !erasable();
470  if (is_insertable_map && result) {
471  execution_space().fence(
472  "Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
473  "flag");
474  set_flag(erasable_idx);
475  }
476  return result;
477  }
478 
479  bool end_erase() {
480  bool result = erasable();
481  if (is_insertable_map && result) {
482  execution_space().fence(
483  "Kokkos::UnorderedMap::end_erase: fence before erasing");
484  Impl::UnorderedMapErase<declared_map_type> f(*this);
485  f.apply();
486  execution_space().fence(
487  "Kokkos::UnorderedMap::end_erase: fence after erasing");
488  reset_flag(erasable_idx);
489  }
490  return result;
491  }
492 
497  KOKKOS_FORCEINLINE_FUNCTION
498  size_type capacity() const { return m_available_indexes.size(); }
499 
510  KOKKOS_INLINE_FUNCTION
511  size_type hash_capacity() const { return m_hash_lists.extent(0); }
512 
513  //---------------------------------------------------------------------------
514  //---------------------------------------------------------------------------
515 
527  template <typename InsertOpType = default_op_type>
528  KOKKOS_INLINE_FUNCTION insert_result
529  insert(key_type const &k, impl_value_type const &v = impl_value_type(),
530  [[maybe_unused]] InsertOpType arg_insert_op = InsertOpType()) const {
531  if constexpr (is_set) {
532  static_assert(std::is_same_v<InsertOpType, default_op_type>,
533  "Insert Operations are not supported on sets.");
534  }
535 
536  insert_result result;
537 
538  if (!is_insertable_map || capacity() == 0u ||
539  m_scalars((int)erasable_idx)) {
540  return result;
541  }
542 
543  if (!m_scalars((int)modified_idx)) {
544  m_scalars((int)modified_idx) = true;
545  }
546 
547  int volatile &failed_insert_ref = m_scalars((int)failed_insert_idx);
548 
549  const size_type hash_value = m_hasher(k);
550  const size_type hash_list = hash_value % m_hash_lists.extent(0);
551 
552  size_type *curr_ptr = &m_hash_lists[hash_list];
553  size_type new_index = invalid_index;
554 
555  // Force integer multiply to long
556  size_type index_hint = static_cast<size_type>(
557  (static_cast<double>(hash_list) * capacity()) / m_hash_lists.extent(0));
558 
559  size_type find_attempts = 0;
560 
561  enum : unsigned { bounded_find_attempts = 32u };
562  const size_type max_attempts =
563  (m_bounded_insert &&
564  (bounded_find_attempts < m_available_indexes.max_hint()))
565  ? bounded_find_attempts
566  : m_available_indexes.max_hint();
567 
568  bool not_done = true;
569 
570 #if defined(__MIC__)
571 #pragma noprefetch
572 #endif
573  while (not_done) {
574  // Continue searching the unordered list for this key,
575  // list will only be appended during insert phase.
576  // Need volatile_load as other threads may be appending.
577 
578  // FIXME_SYCL replacement for memory_fence
579 #ifdef KOKKOS_ENABLE_SYCL
580  size_type curr = Kokkos::atomic_load(curr_ptr);
581 #else
582  size_type curr = volatile_load(curr_ptr);
583 #endif
584 
585  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
586  &m_keys[curr != invalid_index ? curr : 0]);
587 #if defined(__MIC__)
588 #pragma noprefetch
589 #endif
590  while (curr != invalid_index && !m_equal_to(
591 #ifdef KOKKOS_ENABLE_SYCL
592  Kokkos::atomic_load(&m_keys[curr])
593 #else
594  volatile_load(&m_keys[curr])
595 #endif
596  ,
597  k)) {
598  result.increment_list_position();
599  index_hint = curr;
600  curr_ptr = &m_next_index[curr];
601 #ifdef KOKKOS_ENABLE_SYCL
602  curr = Kokkos::atomic_load(curr_ptr);
603 #else
604  curr = volatile_load(curr_ptr);
605 #endif
606  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
607  &m_keys[curr != invalid_index ? curr : 0]);
608  }
609 
610  //------------------------------------------------------------
611  // If key already present then return that index.
612  if (curr != invalid_index) {
613  const bool free_existing = new_index != invalid_index;
614  if (free_existing) {
615  // Previously claimed an unused entry that was not inserted.
616  // Release this unused entry immediately.
617  if (!m_available_indexes.reset(new_index)) {
618  Kokkos::printf("Unable to free existing\n");
619  }
620  }
621 
622  result.set_existing(curr, free_existing);
623  if constexpr (!is_set) {
624  arg_insert_op.op(m_values, curr, v);
625  }
626  not_done = false;
627  }
628  //------------------------------------------------------------
629  // Key is not currently in the map.
630  // If the thread has claimed an entry try to insert now.
631  else {
632  //------------------------------------------------------------
633  // If have not already claimed an unused entry then do so now.
634  if (new_index == invalid_index) {
635  bool found = false;
636  // use the hash_list as the flag for the search direction
637  Kokkos::tie(found, index_hint) =
638  m_available_indexes.find_any_unset_near(index_hint, hash_list);
639 
640  // found and index and this thread set it
641  if (!found && ++find_attempts >= max_attempts) {
642  failed_insert_ref = true;
643  not_done = false;
644  } else if (m_available_indexes.set(index_hint)) {
645  new_index = index_hint;
646  // Set key and value
647  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
648 // FIXME_SYCL replacement for memory_fence
649 #ifdef KOKKOS_ENABLE_SYCL
650  Kokkos::atomic_store(&m_keys[new_index], k);
651 #else
652  m_keys[new_index] = k;
653 #endif
654 
655  if (!is_set) {
656  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
657 #ifdef KOKKOS_ENABLE_SYCL
658  Kokkos::atomic_store(&m_values[new_index], v);
659 #else
660  m_values[new_index] = v;
661 #endif
662  }
663 
664 #ifndef KOKKOS_ENABLE_SYCL
665  // Do not proceed until key and value are updated in global memory
666  memory_fence();
667 #endif
668  }
669  } else if (failed_insert_ref) {
670  not_done = false;
671  }
672 
673  // Attempt to append claimed entry into the list.
674  // Another thread may also be trying to append the same list so protect
675  // with atomic.
676  if (new_index != invalid_index &&
677  curr == atomic_compare_exchange(
678  curr_ptr, static_cast<size_type>(invalid_index),
679  new_index)) {
680  // Succeeded in appending
681  result.set_success(new_index);
682  not_done = false;
683  }
684  }
685  } // while ( not_done )
686 
687  return result;
688  }
689 
690  KOKKOS_INLINE_FUNCTION
691  bool erase(key_type const &k) const {
692  bool result = false;
693 
694  if (is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) {
695  if (!m_scalars((int)modified_idx)) {
696  m_scalars((int)modified_idx) = true;
697  }
698 
699  size_type index = find(k);
700  if (valid_at(index)) {
701  m_available_indexes.reset(index);
702  result = true;
703  }
704  }
705 
706  return result;
707  }
708 
716  KOKKOS_INLINE_FUNCTION
717  size_type find(const key_type &k) const {
718  size_type curr = 0u < capacity()
719  ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
720  : invalid_index;
721 
722  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
723  while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
724  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
725  &m_keys[curr != invalid_index ? curr : 0]);
726  curr = m_next_index[curr];
727  }
728 
729  return curr;
730  }
731 
736  KOKKOS_INLINE_FUNCTION
737  bool exists(const key_type &k) const { return valid_at(find(k)); }
738 
747  template <typename Dummy = value_type>
748  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
749  !std::is_void<Dummy>::value, // !is_set
750  std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
751  value_at(size_type i) const {
752  KOKKOS_EXPECTS(i < capacity());
753  return m_values[i];
754  }
755 
762  KOKKOS_FORCEINLINE_FUNCTION
763  key_type key_at(size_type i) const {
764  KOKKOS_EXPECTS(i < capacity());
765  return m_keys[i];
766  }
767 
768  KOKKOS_FORCEINLINE_FUNCTION
769  bool valid_at(size_type i) const { return m_available_indexes.test(i); }
770 
771  template <typename SKey, typename SValue>
772  UnorderedMap(
773  UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src,
774  std::enable_if_t<
775  Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
776  SKey, SValue>::value,
777  int> = 0)
778  : m_bounded_insert(src.m_bounded_insert),
779  m_hasher(src.m_hasher),
780  m_equal_to(src.m_equal_to),
781  m_size(src.m_size),
782  m_available_indexes(src.m_available_indexes),
783  m_hash_lists(src.m_hash_lists),
784  m_next_index(src.m_next_index),
785  m_keys(src.m_keys),
786  m_values(src.m_values),
787  m_scalars(src.m_scalars) {}
788 
789  template <typename SKey, typename SValue>
790  std::enable_if_t<
791  Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
792  SValue>::value,
793  declared_map_type &>
794  operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src) {
795  m_bounded_insert = src.m_bounded_insert;
796  m_hasher = src.m_hasher;
797  m_equal_to = src.m_equal_to;
798  m_size = src.m_size;
799  m_available_indexes = src.m_available_indexes;
800  m_hash_lists = src.m_hash_lists;
801  m_next_index = src.m_next_index;
802  m_keys = src.m_keys;
803  m_values = src.m_values;
804  m_scalars = src.m_scalars;
805  return *this;
806  }
807 
808  template <typename SKey, typename SValue, typename SDevice>
809  std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
810  std::is_same<std::remove_const_t<SValue>, value_type>::value>
811  create_copy_view(
812  UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
813  if (m_hash_lists.data() != src.m_hash_lists.data()) {
814  insertable_map_type tmp;
815 
816  tmp.m_bounded_insert = src.m_bounded_insert;
817  tmp.m_hasher = src.m_hasher;
818  tmp.m_equal_to = src.m_equal_to;
819  tmp.m_size() = src.m_size();
820  tmp.m_available_indexes = bitset_type(src.capacity());
821  tmp.m_hash_lists = size_type_view(
822  view_alloc(WithoutInitializing, "UnorderedMap hash list"),
823  src.m_hash_lists.extent(0));
824  tmp.m_next_index = size_type_view(
825  view_alloc(WithoutInitializing, "UnorderedMap next index"),
826  src.m_next_index.extent(0));
827  tmp.m_keys =
828  key_type_view(view_alloc(WithoutInitializing, "UnorderedMap keys"),
829  src.m_keys.extent(0));
830  tmp.m_values = value_type_view(
831  view_alloc(WithoutInitializing, "UnorderedMap values"),
832  src.m_values.extent(0));
833  tmp.m_scalars = scalars_view("UnorderedMap scalars");
834 
835  Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
836 
837  using raw_deep_copy =
838  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
839  typename SDevice::memory_space>;
840 
841  raw_deep_copy(tmp.m_hash_lists.data(), src.m_hash_lists.data(),
842  sizeof(size_type) * src.m_hash_lists.extent(0));
843  raw_deep_copy(tmp.m_next_index.data(), src.m_next_index.data(),
844  sizeof(size_type) * src.m_next_index.extent(0));
845  raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
846  sizeof(key_type) * src.m_keys.extent(0));
847  if (!is_set) {
848  raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
849  sizeof(impl_value_type) * src.m_values.extent(0));
850  }
851  raw_deep_copy(tmp.m_scalars.data(), src.m_scalars.data(),
852  sizeof(int) * num_scalars);
853 
854  Kokkos::fence(
855  "Kokkos::UnorderedMap::create_copy_view: fence after copy to tmp");
856 
857  *this = tmp;
858  }
859  }
860 
862  private: // private member functions
863  bool modified() const { return get_flag(modified_idx); }
864 
865  void set_flag(int flag) const {
866  using raw_deep_copy =
867  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
869  const int true_ = true;
870  raw_deep_copy(m_scalars.data() + flag, &true_, sizeof(int));
871  Kokkos::fence(
872  "Kokkos::UnorderedMap::set_flag: fence after copying flag from "
873  "HostSpace");
874  }
875 
876  void reset_flag(int flag) const {
877  using raw_deep_copy =
878  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
880  const int false_ = false;
881  raw_deep_copy(m_scalars.data() + flag, &false_, sizeof(int));
882  Kokkos::fence(
883  "Kokkos::UnorderedMap::reset_flag: fence after copying flag from "
884  "HostSpace");
885  }
886 
887  bool get_flag(int flag) const {
888  using raw_deep_copy =
889  Kokkos::Impl::DeepCopy<Kokkos::HostSpace,
890  typename device_type::memory_space>;
891  int result = false;
892  raw_deep_copy(&result, m_scalars.data() + flag, sizeof(int));
893  Kokkos::fence(
894  "Kokkos::UnorderedMap::get_flag: fence after copy to return value in "
895  "HostSpace");
896  return result;
897  }
898 
899  static uint32_t calculate_capacity(uint32_t capacity_hint) {
900  // increase by 16% and round to nears multiple of 128
901  return capacity_hint
902  ? ((static_cast<uint32_t>(7ull * capacity_hint / 6u) + 127u) /
903  128u) *
904  128u
905  : 128u;
906  }
907 
908  private: // private members
909  bool m_bounded_insert;
910  hasher_type m_hasher;
911  equal_to_type m_equal_to;
912  using shared_size_t = View<size_type, Kokkos::DefaultHostExecutionSpace>;
913  shared_size_t m_size;
914  bitset_type m_available_indexes;
915  size_type_view m_hash_lists;
916  size_type_view m_next_index;
917  key_type_view m_keys;
918  value_type_view m_values;
919  scalars_view m_scalars;
920 
921  template <typename KKey, typename VValue, typename DDevice, typename HHash,
922  typename EEqualTo>
923  friend class UnorderedMap;
924 
925  template <typename UMap>
926  friend struct Impl::UnorderedMapErase;
927 
928  template <typename UMap>
929  friend struct Impl::UnorderedMapHistogram;
930 
931  template <typename UMap>
932  friend struct Impl::UnorderedMapPrint;
933 };
934 
935 // Specialization of deep_copy for two UnorderedMap objects.
936 template <typename DKey, typename DT, typename DDevice, typename SKey,
937  typename ST, typename SDevice, typename Hasher, typename EqualTo>
938 inline void deep_copy(
939  UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
940  const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
941  dst.create_copy_view(src);
942 }
943 
944 } // namespace Kokkos
945 
946 #ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
947 #undef KOKKOS_IMPL_PUBLIC_INCLUDE
948 #undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
949 #endif
950 #endif // KOKKOS_UNORDERED_MAP_HPP
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type(), [[maybe_unused]] InsertOpType arg_insert_op=InsertOpType()) const
UnorderedMap(size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
size_type size() const
The number of entries in the table.
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
Operations applied to the values array upon subsequent insertions.
void clear()
Clear all entries in the table.
View to an array of data.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table &quot;buckets.&quot;.
First element of the return value of UnorderedMap::insert().
Memory management for host memory.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
bool failed_insert() const
The current number of failed insert() calls.
KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t< !std::is_void< Dummy >::value, std::conditional_t< has_const_value, impl_value_type, impl_value_type & > > value_at(size_type i) const
Get the value with i as its direct index.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
Thread-safe, performance-portable lookup table.
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficient capacity.
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
UnorderedMap(const Impl::ViewCtorProp< P...> &arg_prop, size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())