Kokkos Core Kernels Package  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
Kokkos_UnorderedMap.hpp
1 //@HEADER
2 // ************************************************************************
3 //
4 // Kokkos v. 4.0
5 // Copyright (2022) National Technology & Engineering
6 // Solutions of Sandia, LLC (NTESS).
7 //
8 // Under the terms of Contract DE-NA0003525 with NTESS,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Part of Kokkos, under the Apache License v2.0 with LLVM Exceptions.
12 // See https://kokkos.org/LICENSE for license information.
13 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
14 //
15 //@HEADER
16 
22 
23 #ifndef KOKKOS_UNORDERED_MAP_HPP
24 #define KOKKOS_UNORDERED_MAP_HPP
25 #ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
26 #define KOKKOS_IMPL_PUBLIC_INCLUDE
27 #define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
28 #endif
29 
30 #include <Kokkos_Core.hpp>
31 #include <Kokkos_Functional.hpp>
32 
33 #include <Kokkos_Bitset.hpp>
34 
35 #include <impl/Kokkos_Traits.hpp>
36 #include <impl/Kokkos_UnorderedMap_impl.hpp>
37 #include <View/Kokkos_ViewCtor.hpp>
38 
39 #include <cstdint>
40 
41 namespace Kokkos {
42 
43 enum : unsigned { UnorderedMapInvalidIndex = ~0u };
44 
58 
59 class UnorderedMapInsertResult {
60  private:
61  enum Status : uint32_t {
62  SUCCESS = 1u << 31,
63  EXISTING = 1u << 30,
64  FREED_EXISTING = 1u << 29,
65  LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
66  };
67 
68  public:
70  KOKKOS_FORCEINLINE_FUNCTION
71  bool success() const { return (m_status & SUCCESS); }
72 
74  KOKKOS_FORCEINLINE_FUNCTION
75  bool existing() const { return (m_status & EXISTING); }
76 
78  KOKKOS_FORCEINLINE_FUNCTION
79  bool failed() const { return m_index == UnorderedMapInvalidIndex; }
80 
83  KOKKOS_FORCEINLINE_FUNCTION
84  bool freed_existing() const { return (m_status & FREED_EXISTING); }
85 
88  KOKKOS_FORCEINLINE_FUNCTION
89  uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); }
90 
92  KOKKOS_FORCEINLINE_FUNCTION
93  uint32_t index() const { return m_index; }
94 
95  KOKKOS_FORCEINLINE_FUNCTION
96  UnorderedMapInsertResult() : m_index(UnorderedMapInvalidIndex), m_status(0) {}
97 
98  KOKKOS_FORCEINLINE_FUNCTION
99  void increment_list_position() {
100  m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u;
101  }
102 
103  KOKKOS_FORCEINLINE_FUNCTION
104  void set_existing(uint32_t i, bool arg_freed_existing) {
105  m_index = i;
106  m_status =
107  EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position();
108  }
109 
110  KOKKOS_FORCEINLINE_FUNCTION
111  void set_success(uint32_t i) {
112  m_index = i;
113  m_status = SUCCESS | list_position();
114  }
115 
116  private:
117  uint32_t m_index;
118  uint32_t m_status;
119 };
120 
135 template <class ValueTypeView, class ValuesIdxType>
136 struct UnorderedMapInsertOpTypes {
137  using value_type = typename ValueTypeView::non_const_value_type;
138  struct NoOp {
139  KOKKOS_FUNCTION
140  void op(ValueTypeView, ValuesIdxType, const value_type) const {}
141  };
142  struct AtomicAdd {
143  KOKKOS_FUNCTION
144  void op(ValueTypeView values, ValuesIdxType values_idx,
145  const value_type v) const {
146  Kokkos::atomic_add(values.data() + values_idx, v);
147  }
148  };
149 };
150 
206 template <typename Key, typename Value,
207  typename Device = Kokkos::DefaultExecutionSpace,
208  typename Hasher = pod_hash<std::remove_const_t<Key>>,
209  typename EqualTo = pod_equal_to<std::remove_const_t<Key>>>
210 class UnorderedMap {
211  private:
212  using host_mirror_space =
213  typename ViewTraits<Key, Device, void, void>::host_mirror_space;
214 
215  public:
217 
218  // key_types
219  using declared_key_type = Key;
220  using key_type = std::remove_const_t<declared_key_type>;
221  using const_key_type = std::add_const_t<key_type>;
222 
223  // value_types
224  using declared_value_type = Value;
225  using value_type = std::remove_const_t<declared_value_type>;
226  using const_value_type = std::add_const_t<value_type>;
227 
228  using device_type = Device;
229  using execution_space = typename Device::execution_space;
230  using hasher_type = Hasher;
231  using equal_to_type = EqualTo;
232  using size_type = uint32_t;
233 
234  // map_types
235  using declared_map_type =
236  UnorderedMap<declared_key_type, declared_value_type, device_type,
237  hasher_type, equal_to_type>;
238  using insertable_map_type = UnorderedMap<key_type, value_type, device_type,
239  hasher_type, equal_to_type>;
240  using modifiable_map_type =
241  UnorderedMap<const_key_type, value_type, device_type, hasher_type,
242  equal_to_type>;
243  using const_map_type = UnorderedMap<const_key_type, const_value_type,
244  device_type, hasher_type, equal_to_type>;
245 
246  static constexpr bool is_set = std::is_void_v<value_type>;
247  static constexpr bool has_const_key =
248  std::is_same_v<const_key_type, declared_key_type>;
249  static constexpr bool has_const_value =
250  is_set || std::is_same_v<const_value_type, declared_value_type>;
251 
252  static constexpr bool is_insertable_map =
253  !has_const_key && (is_set || !has_const_value);
254  static constexpr bool is_modifiable_map = has_const_key && !has_const_value;
255  static constexpr bool is_const_map = has_const_key && has_const_value;
256 
257  using insert_result = UnorderedMapInsertResult;
258 
259  using HostMirror =
260  UnorderedMap<Key, Value, host_mirror_space, Hasher, EqualTo>;
261 
262  using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
264 
265  private:
266  enum : size_type { invalid_index = ~static_cast<size_type>(0) };
267 
268  using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
269 
270  using key_type_view = std::conditional_t<
271  is_insertable_map, View<key_type *, device_type>,
272  View<const key_type *, device_type, MemoryTraits<RandomAccess>>>;
273 
274  using value_type_view = std::conditional_t<
275  is_insertable_map || is_modifiable_map,
276  View<impl_value_type *, device_type>,
277  View<const impl_value_type *, device_type, MemoryTraits<RandomAccess>>>;
278 
279  using size_type_view = std::conditional_t<
280  is_insertable_map, View<size_type *, device_type>,
281  View<const size_type *, device_type, MemoryTraits<RandomAccess>>>;
282 
283  using bitset_type = std::conditional_t<is_insertable_map, Bitset<Device>,
284  ConstBitset<Device>>;
285 
286  enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
287  enum { num_scalars = 3 };
288  using scalars_view = View<int[num_scalars], LayoutLeft, device_type>;
289 
290  public:
292 
293  using default_op_type =
294  typename UnorderedMapInsertOpTypes<value_type_view, uint32_t>::NoOp;
295 
304  UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
305  equal_to_type equal_to = equal_to_type())
306  : UnorderedMap(Kokkos::view_alloc(), capacity_hint, hasher, equal_to) {}
307 
308  template <class... P>
309  UnorderedMap(const Impl::ViewCtorProp<P...> &arg_prop,
310  size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
311  equal_to_type equal_to = equal_to_type())
312  : m_bounded_insert(true), m_hasher(hasher), m_equal_to(equal_to) {
313  if (!is_insertable_map) {
314  Kokkos::Impl::throw_runtime_exception(
315  "Cannot construct a non-insertable (i.e. const key_type) "
316  "unordered_map");
317  }
318 
320  using alloc_prop_t = std::decay_t<decltype(arg_prop)>;
321  static_assert(alloc_prop_t::initialize,
322  "Allocation property 'initialize' should be true.");
323  static_assert(
324  !alloc_prop_t::has_pointer,
325  "Allocation properties should not contain the 'pointer' property.");
326 
329  const auto prop_copy =
330  Impl::with_properties_if_unset(arg_prop, std::string("UnorderedMap"));
331  const auto prop_copy_noinit =
332  Impl::with_properties_if_unset(prop_copy, Kokkos::WithoutInitializing);
333 
335  m_size = shared_size_t(Kokkos::view_alloc(
336  Kokkos::DefaultHostExecutionSpace{},
337  Impl::get_property<Impl::LabelTag>(prop_copy) + " - size"));
338 
339  m_available_indexes =
340  bitset_type(Kokkos::Impl::append_to_label(prop_copy, " - bitset"),
341  calculate_capacity(capacity_hint));
342 
343  m_hash_lists = size_type_view(
344  Kokkos::Impl::append_to_label(prop_copy_noinit, " - hash list"),
345  Impl::find_hash_size(capacity()));
346 
347  m_next_index = size_type_view(
348  Kokkos::Impl::append_to_label(prop_copy_noinit, " - next index"),
349  capacity() + 1); // +1 so that the *_at functions can always return a
350  // valid reference
351 
352  m_keys = key_type_view(Kokkos::Impl::append_to_label(prop_copy, " - keys"),
353  capacity());
354 
355  m_values =
356  value_type_view(Kokkos::Impl::append_to_label(prop_copy, " - values"),
357  is_set ? 0 : capacity());
358 
359  m_scalars =
360  scalars_view(Kokkos::Impl::append_to_label(prop_copy, " - scalars"));
361 
368  if constexpr (alloc_prop_t::has_execution_space) {
369  const auto &space = Impl::get_property<Impl::ExecutionSpaceTag>(arg_prop);
370  Kokkos::deep_copy(space, m_hash_lists, invalid_index);
371  Kokkos::deep_copy(space, m_next_index, invalid_index);
372  } else {
373  Kokkos::deep_copy(m_hash_lists, invalid_index);
374  Kokkos::deep_copy(m_next_index, invalid_index);
375  }
376  }
377 
378  void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
379 
380  histogram_type get_histogram() { return histogram_type(*this); }
381 
383  void clear() {
384  m_bounded_insert = true;
385 
386  if (capacity() == 0) return;
387 
388  m_available_indexes.clear();
389 
390  Kokkos::deep_copy(m_hash_lists, invalid_index);
391  Kokkos::deep_copy(m_next_index, invalid_index);
392  {
393  const key_type tmp = key_type();
394  Kokkos::deep_copy(m_keys, tmp);
395  }
396  Kokkos::deep_copy(m_scalars, 0);
397  m_size() = 0;
398  }
399 
400  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
401  return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
402  m_scalars.is_allocated());
403  }
404 
415  bool rehash(size_type requested_capacity = 0) {
416  const bool bounded_insert = (capacity() == 0) || (size() == 0u);
417  return rehash(requested_capacity, bounded_insert);
418  }
419 
420  bool rehash(size_type requested_capacity, bool bounded_insert) {
421  if (!is_insertable_map) return false;
422 
423  const size_type curr_size = size();
424  requested_capacity =
425  (requested_capacity < curr_size) ? curr_size : requested_capacity;
426 
427  insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
428 
429  if (curr_size) {
430  tmp.m_bounded_insert = false;
431  Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *this);
432  f.apply();
433  }
434  tmp.m_bounded_insert = bounded_insert;
435 
436  *this = tmp;
437 
438  return true;
439  }
440 
448  size_type size() const {
449  if (capacity() == 0u) return 0u;
450  if (modified()) {
451  m_size() = m_available_indexes.count();
452  reset_flag(modified_idx);
453  }
454  return m_size();
455  }
456 
462  bool failed_insert() const { return get_flag(failed_insert_idx); }
463 
464  bool erasable() const {
465  return is_insertable_map ? get_flag(erasable_idx) : false;
466  }
467 
468  bool begin_erase() {
469  bool result = !erasable();
470  if (is_insertable_map && result) {
471  execution_space().fence(
472  "Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
473  "flag");
474  set_flag(erasable_idx);
475  }
476  return result;
477  }
478 
479  bool end_erase() {
480  bool result = erasable();
481  if (is_insertable_map && result) {
482  execution_space().fence(
483  "Kokkos::UnorderedMap::end_erase: fence before erasing");
484  Impl::UnorderedMapErase<declared_map_type> f(*this);
485  f.apply();
486  execution_space().fence(
487  "Kokkos::UnorderedMap::end_erase: fence after erasing");
488  reset_flag(erasable_idx);
489  }
490  return result;
491  }
492 
497  KOKKOS_FORCEINLINE_FUNCTION
498  size_type capacity() const { return m_available_indexes.size(); }
499 
510  KOKKOS_INLINE_FUNCTION
511  size_type hash_capacity() const { return m_hash_lists.extent(0); }
512 
513  //---------------------------------------------------------------------------
514  //---------------------------------------------------------------------------
515 
527  template <typename InsertOpType = default_op_type>
528  KOKKOS_INLINE_FUNCTION insert_result
529  insert(key_type const &k, impl_value_type const &v = impl_value_type(),
530  [[maybe_unused]] InsertOpType arg_insert_op = InsertOpType()) const {
531  if constexpr (is_set) {
532  static_assert(std::is_same_v<InsertOpType, default_op_type>,
533  "Insert Operations are not supported on sets.");
534  }
535 
536  insert_result result;
537 
538  if (!is_insertable_map || capacity() == 0u ||
539  m_scalars((int)erasable_idx)) {
540  return result;
541  }
542 
543  if (!m_scalars((int)modified_idx)) {
544  m_scalars((int)modified_idx) = true;
545  }
546 
547  int volatile &failed_insert_ref = m_scalars((int)failed_insert_idx);
548 
549  const size_type hash_value = m_hasher(k);
550  const size_type hash_list = hash_value % m_hash_lists.extent(0);
551 
552  size_type *curr_ptr = &m_hash_lists[hash_list];
553  size_type new_index = invalid_index;
554 
555  // Force integer multiply to long
556  size_type index_hint = static_cast<size_type>(
557  (static_cast<double>(hash_list) * capacity()) / m_hash_lists.extent(0));
558 
559  size_type find_attempts = 0;
560 
561  enum : unsigned { bounded_find_attempts = 32u };
562  const size_type max_attempts =
563  (m_bounded_insert &&
564  (bounded_find_attempts < m_available_indexes.max_hint()))
565  ? bounded_find_attempts
566  : m_available_indexes.max_hint();
567 
568  bool not_done = true;
569 
570 #if defined(__MIC__)
571 #pragma noprefetch
572 #endif
573  while (not_done) {
574  // Continue searching the unordered list for this key,
575  // list will only be appended during insert phase.
576  // Need volatile_load as other threads may be appending.
577 
578  // FIXME_SYCL replacement for memory_fence
579 #ifdef KOKKOS_ENABLE_SYCL
580  size_type curr = Kokkos::atomic_load(curr_ptr);
581 #else
582  size_type curr = volatile_load(curr_ptr);
583 #endif
584 
585  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
586  &m_keys[curr != invalid_index ? curr : 0]);
587 #if defined(__MIC__)
588 #pragma noprefetch
589 #endif
590  while (curr != invalid_index && !m_equal_to(
591 #ifdef KOKKOS_ENABLE_SYCL
592  Kokkos::atomic_load(&m_keys[curr])
593 #else
594  volatile_load(&m_keys[curr])
595 #endif
596  ,
597  k)) {
598  result.increment_list_position();
599  index_hint = curr;
600  curr_ptr = &m_next_index[curr];
601 #ifdef KOKKOS_ENABLE_SYCL
602  curr = Kokkos::atomic_load(curr_ptr);
603 #else
604  curr = volatile_load(curr_ptr);
605 #endif
606  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
607  &m_keys[curr != invalid_index ? curr : 0]);
608  }
609 
610  //------------------------------------------------------------
611  // If key already present then return that index.
612  if (curr != invalid_index) {
613  const bool free_existing = new_index != invalid_index;
614  if (free_existing) {
615  // Previously claimed an unused entry that was not inserted.
616  // Release this unused entry immediately.
617  if (!m_available_indexes.reset(new_index)) {
618  Kokkos::printf("Unable to free existing\n");
619  }
620  }
621 
622  result.set_existing(curr, free_existing);
623  if constexpr (!is_set) {
624  arg_insert_op.op(m_values, curr, v);
625  }
626  not_done = false;
627  }
628  //------------------------------------------------------------
629  // Key is not currently in the map.
630  // If the thread has claimed an entry try to insert now.
631  else {
632  //------------------------------------------------------------
633  // If have not already claimed an unused entry then do so now.
634  if (new_index == invalid_index) {
635  bool found = false;
636  // use the hash_list as the flag for the search direction
637  Kokkos::tie(found, index_hint) =
638  m_available_indexes.find_any_unset_near(index_hint, hash_list);
639 
640  // found and index and this thread set it
641  if (!found && ++find_attempts >= max_attempts) {
642  failed_insert_ref = true;
643  not_done = false;
644  } else if (m_available_indexes.set(index_hint)) {
645  new_index = index_hint;
646  // Set key and value
647  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
648 // FIXME_SYCL replacement for memory_fence
649 #ifdef KOKKOS_ENABLE_SYCL
650  Kokkos::atomic_store(&m_keys[new_index], k);
651 #else
652  m_keys[new_index] = k;
653 #endif
654 
655  if (!is_set) {
656  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
657 #ifdef KOKKOS_ENABLE_SYCL
658  Kokkos::atomic_store(&m_values[new_index], v);
659 #else
660  m_values[new_index] = v;
661 #endif
662  }
663 
664 #ifndef KOKKOS_ENABLE_SYCL
665  // Do not proceed until key and value are updated in global memory
666  memory_fence();
667 #endif
668  }
669  } else if (failed_insert_ref) {
670  not_done = false;
671  }
672 
673  // Attempt to append claimed entry into the list.
674  // Another thread may also be trying to append the same list so protect
675  // with atomic.
676  if (new_index != invalid_index &&
677  curr == atomic_compare_exchange(
678  curr_ptr, static_cast<size_type>(invalid_index),
679  new_index)) {
680  // Succeeded in appending
681  result.set_success(new_index);
682  not_done = false;
683  }
684  }
685  } // while ( not_done )
686 
687  return result;
688  }
689 
690  KOKKOS_INLINE_FUNCTION
691  bool erase(key_type const &k) const {
692  bool result = false;
693 
694  if (is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) {
695  if (!m_scalars((int)modified_idx)) {
696  m_scalars((int)modified_idx) = true;
697  }
698 
699  size_type index = find(k);
700  if (valid_at(index)) {
701  m_available_indexes.reset(index);
702  result = true;
703  }
704  }
705 
706  return result;
707  }
708 
716  KOKKOS_INLINE_FUNCTION
717  size_type find(const key_type &k) const {
718  size_type curr = 0u < capacity()
719  ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
720  : invalid_index;
721 
722  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
723  while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
724  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
725  &m_keys[curr != invalid_index ? curr : 0]);
726  curr = m_next_index[curr];
727  }
728 
729  return curr;
730  }
731 
736  KOKKOS_INLINE_FUNCTION
737  bool exists(const key_type &k) const { return valid_at(find(k)); }
738 
747  template <typename Dummy = value_type>
748  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
749  !std::is_void_v<Dummy>, // !is_set
750  std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
751  value_at(size_type i) const {
752  KOKKOS_EXPECTS(i < capacity());
753  return m_values[i];
754  }
755 
762  KOKKOS_FORCEINLINE_FUNCTION
763  key_type key_at(size_type i) const {
764  KOKKOS_EXPECTS(i < capacity());
765  return m_keys[i];
766  }
767 
768  KOKKOS_FORCEINLINE_FUNCTION
769  bool valid_at(size_type i) const { return m_available_indexes.test(i); }
770 
771  template <typename SKey, typename SValue>
772  UnorderedMap(
773  UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src,
774  std::enable_if_t<
775  Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
776  SKey, SValue>::value,
777  int> = 0)
778  : m_bounded_insert(src.m_bounded_insert),
779  m_hasher(src.m_hasher),
780  m_equal_to(src.m_equal_to),
781  m_size(src.m_size),
782  m_available_indexes(src.m_available_indexes),
783  m_hash_lists(src.m_hash_lists),
784  m_next_index(src.m_next_index),
785  m_keys(src.m_keys),
786  m_values(src.m_values),
787  m_scalars(src.m_scalars) {}
788 
789  template <typename SKey, typename SValue>
790  std::enable_if_t<
791  Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
792  SValue>::value,
793  declared_map_type &>
794  operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src) {
795  m_bounded_insert = src.m_bounded_insert;
796  m_hasher = src.m_hasher;
797  m_equal_to = src.m_equal_to;
798  m_size = src.m_size;
799  m_available_indexes = src.m_available_indexes;
800  m_hash_lists = src.m_hash_lists;
801  m_next_index = src.m_next_index;
802  m_keys = src.m_keys;
803  m_values = src.m_values;
804  m_scalars = src.m_scalars;
805  return *this;
806  }
807 
808  // Re-allocate the views of the calling UnorderedMap according to src
809  // capacity, and deep copy the src data.
810  template <typename SKey, typename SValue, typename SDevice>
811  std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
812  std::is_same_v<std::remove_const_t<SValue>, value_type>>
813  create_copy_view(
814  UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
815  if (m_hash_lists.data() != src.m_hash_lists.data()) {
816  allocate_view(src);
817  deep_copy_view(src);
818  }
819  }
820 
821  // Allocate views of the calling UnorderedMap with the same capacity as the
822  // src.
823  template <typename SKey, typename SValue, typename SDevice>
824  std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
825  std::is_same_v<std::remove_const_t<SValue>, value_type>>
826  allocate_view(
827  UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
828  insertable_map_type tmp;
829 
830  tmp.m_bounded_insert = src.m_bounded_insert;
831  tmp.m_hasher = src.m_hasher;
832  tmp.m_equal_to = src.m_equal_to;
833  tmp.m_size() = src.m_size();
834  tmp.m_available_indexes = bitset_type(src.capacity());
835  tmp.m_hash_lists = size_type_view(
836  view_alloc(WithoutInitializing, "UnorderedMap hash list"),
837  src.m_hash_lists.extent(0));
838  tmp.m_next_index = size_type_view(
839  view_alloc(WithoutInitializing, "UnorderedMap next index"),
840  src.m_next_index.extent(0));
841  tmp.m_keys =
842  key_type_view(view_alloc(WithoutInitializing, "UnorderedMap keys"),
843  src.m_keys.extent(0));
844  tmp.m_values =
845  value_type_view(view_alloc(WithoutInitializing, "UnorderedMap values"),
846  src.m_values.extent(0));
847  tmp.m_scalars = scalars_view("UnorderedMap scalars");
848 
849  *this = tmp;
850  }
851 
852  // Deep copy view data from src. This requires that the src capacity is
853  // identical to the capacity of the calling UnorderedMap.
854  template <typename SKey, typename SValue, typename SDevice>
855  std::enable_if_t<std::is_same_v<std::remove_const_t<SKey>, key_type> &&
856  std::is_same_v<std::remove_const_t<SValue>, value_type>>
857  deep_copy_view(
858  UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
859 #ifndef KOKKOS_ENABLE_DEPRECATED_CODE_4
860  // To deep copy UnorderedMap, capacity must be identical
861  KOKKOS_EXPECTS(capacity() == src.capacity());
862 #else
863  if (capacity() != src.capacity()) {
864  allocate_view(src);
865 #ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
866  Kokkos::Impl::log_warning(
867  "Warning: deep_copy_view() allocating views is deprecated. Must call "
868  "with UnorderedMaps of identical capacity, or use "
869  "create_copy_view().\n");
870 #endif
871  }
872 #endif
873 
874  if (m_hash_lists.data() != src.m_hash_lists.data()) {
875  Kokkos::deep_copy(m_available_indexes, src.m_available_indexes);
876 
877  using raw_deep_copy =
878  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
879  typename SDevice::memory_space>;
880 
881  raw_deep_copy(m_hash_lists.data(), src.m_hash_lists.data(),
882  sizeof(size_type) * src.m_hash_lists.extent(0));
883  raw_deep_copy(m_next_index.data(), src.m_next_index.data(),
884  sizeof(size_type) * src.m_next_index.extent(0));
885  raw_deep_copy(m_keys.data(), src.m_keys.data(),
886  sizeof(key_type) * src.m_keys.extent(0));
887  if (!is_set) {
888  raw_deep_copy(m_values.data(), src.m_values.data(),
889  sizeof(impl_value_type) * src.m_values.extent(0));
890  }
891  raw_deep_copy(m_scalars.data(), src.m_scalars.data(),
892  sizeof(int) * num_scalars);
893 
894  Kokkos::fence(
895  "Kokkos::UnorderedMap::deep_copy_view: fence after copy to dst.");
896  }
897  }
898 
900  private: // private member functions
901  bool modified() const { return get_flag(modified_idx); }
902 
903  void set_flag(int flag) const {
904  using raw_deep_copy =
905  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
907  const int true_ = true;
908  raw_deep_copy(m_scalars.data() + flag, &true_, sizeof(int));
909  Kokkos::fence(
910  "Kokkos::UnorderedMap::set_flag: fence after copying flag from "
911  "HostSpace");
912  }
913 
914  void reset_flag(int flag) const {
915  using raw_deep_copy =
916  Kokkos::Impl::DeepCopy<typename device_type::memory_space,
918  const int false_ = false;
919  raw_deep_copy(m_scalars.data() + flag, &false_, sizeof(int));
920  Kokkos::fence(
921  "Kokkos::UnorderedMap::reset_flag: fence after copying flag from "
922  "HostSpace");
923  }
924 
925  bool get_flag(int flag) const {
926  using raw_deep_copy =
927  Kokkos::Impl::DeepCopy<Kokkos::HostSpace,
928  typename device_type::memory_space>;
929  int result = false;
930  raw_deep_copy(&result, m_scalars.data() + flag, sizeof(int));
931  Kokkos::fence(
932  "Kokkos::UnorderedMap::get_flag: fence after copy to return value in "
933  "HostSpace");
934  return result;
935  }
936 
937  static uint32_t calculate_capacity(uint32_t capacity_hint) {
938  // increase by 16% and round to nears multiple of 128
939  return capacity_hint
940  ? ((static_cast<uint32_t>(7ull * capacity_hint / 6u) + 127u) /
941  128u) *
942  128u
943  : 128u;
944  }
945 
946  private: // private members
947  bool m_bounded_insert;
948  hasher_type m_hasher;
949  equal_to_type m_equal_to;
950  using shared_size_t = View<size_type, Kokkos::DefaultHostExecutionSpace>;
951  shared_size_t m_size;
952  bitset_type m_available_indexes;
953  size_type_view m_hash_lists;
954  size_type_view m_next_index;
955  key_type_view m_keys;
956  value_type_view m_values;
957  scalars_view m_scalars;
958 
959  template <typename KKey, typename VValue, typename DDevice, typename HHash,
960  typename EEqualTo>
961  friend class UnorderedMap;
962 
963  template <typename UMap>
964  friend struct Impl::UnorderedMapErase;
965 
966  template <typename UMap>
967  friend struct Impl::UnorderedMapHistogram;
968 
969  template <typename UMap>
970  friend struct Impl::UnorderedMapPrint;
971 };
972 
973 // Specialization of deep_copy() for two UnorderedMap objects.
974 template <typename DKey, typename DT, typename DDevice, typename SKey,
975  typename ST, typename SDevice, typename Hasher, typename EqualTo>
976 inline void deep_copy(
977  UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
978  const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
979  dst.deep_copy_view(src);
980 }
981 
982 // Specialization of create_mirror() for an UnorderedMap object.
983 template <typename Key, typename ValueType, typename Device, typename Hasher,
984  typename EqualTo>
985 typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
986 create_mirror(
987  const UnorderedMap<Key, ValueType, Device, Hasher, EqualTo> &src) {
988  typename UnorderedMap<Key, ValueType, Device, Hasher, EqualTo>::HostMirror
989  dst;
990  dst.allocate_view(src);
991  return dst;
992 }
993 
994 } // namespace Kokkos
995 
996 #ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
997 #undef KOKKOS_IMPL_PUBLIC_INCLUDE
998 #undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
999 #endif
1000 #endif // KOKKOS_UNORDERED_MAP_HPP
KOKKOS_FORCEINLINE_FUNCTION bool success() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Memory management for host memory.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
KOKKOS_FORCEINLINE_FUNCTION bool existing() const