Tpetra parallel linear algebra  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
Tpetra_Details_allReduceView.hpp
Go to the documentation of this file.
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Tpetra: Templated Linear Algebra Services Package
5 // Copyright (2008) Sandia Corporation
6 //
7 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8 // the U.S. Government retains certain rights in this software.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
38 //
39 // ************************************************************************
40 // @HEADER
41 
42 #ifndef TPETRA_DETAILS_ALLREDUCEVIEW_HPP
43 #define TPETRA_DETAILS_ALLREDUCEVIEW_HPP
44 
47 #include "Kokkos_Core.hpp"
48 #include "Teuchos_CommHelpers.hpp"
49 #include <limits>
50 #include <type_traits>
51 
54 
55 namespace { // (anonymous)
56 
57 // We can't assume C++14 yet, so we can't have templated constants.
58 template<class ViewType>
59 struct view_is_cuda_uvm {
60  static constexpr bool value =
61 #ifdef KOKKOS_ENABLE_CUDA
62  std::is_same<typename ViewType::memory_space,
63  Kokkos::CudaUVMSpace>::value;
64 #else
65  false;
66 #endif // KOKKOS_ENABLE_CUDA
67 };
68 
69 template<class ViewType>
70 struct MakeContiguousBuffer {
71  static constexpr bool is_contiguous_layout =
72  std::is_same<
73  typename ViewType::array_layout,
74  Kokkos::LayoutLeft>::value ||
75  std::is_same<
76  typename ViewType::array_layout,
77  Kokkos::LayoutRight>::value;
78  using contiguous_array_layout =
79  typename std::conditional<is_contiguous_layout,
80  typename ViewType::array_layout,
81  Kokkos::LayoutLeft>::type;
82  using contiguous_device_type =
83  typename std::conditional<
84  std::is_same<
85  typename ViewType::memory_space,
86  Kokkos::HostSpace>::value,
87  typename ViewType::device_type,
88  Kokkos::HostSpace::device_type>::type;
89  using contiguous_buffer_type =
90  Kokkos::View<typename ViewType::non_const_data_type,
91  contiguous_array_layout,
92  contiguous_device_type>;
93 
94  static contiguous_array_layout
95  makeLayout (const ViewType& view)
96  {
97  // NOTE (mfh 17 Mar 2019) This would be a good chance to use if
98  // constexpr, once we have C++17.
99  return contiguous_array_layout (view.extent (0), view.extent (1),
100  view.extent (2), view.extent (3),
101  view.extent (4), view.extent (5),
102  view.extent (6), view.extent (7));
103  }
104 
105  static contiguous_buffer_type
106  make (const ViewType& view)
107  {
108  using Kokkos::view_alloc;
109  using Kokkos::WithoutInitializing;
110  return contiguous_buffer_type
111  (view_alloc (view.label (), WithoutInitializing),
112  makeLayout (view));
113  }
114 };
115 
116 template<class ViewType>
117 typename MakeContiguousBuffer<ViewType>::contiguous_buffer_type
118 makeContiguousBuffer (const ViewType& view)
119 {
120  return MakeContiguousBuffer<ViewType>::make (view);
121 }
122 
123 template<class ValueType>
124 static void
125 allReduceRawContiguous (ValueType output[],
126  const ValueType input[],
127  const size_t count,
128  const Teuchos::Comm<int>& comm)
129 {
130  using Teuchos::outArg;
131  using Teuchos::REDUCE_SUM;
132  using Teuchos::reduceAll;
133  constexpr size_t max_int = size_t (std::numeric_limits<int>::max ());
134  TEUCHOS_ASSERT( count <= size_t (max_int) );
135  reduceAll<int, ValueType> (comm, REDUCE_SUM, static_cast<int> (count),
136  input, output);
137 }
138 
139 } // namespace (anonymous)
140 
141 namespace Tpetra {
142 namespace Details {
143 
147 template<class InputViewType, class OutputViewType>
148 static void
149 allReduceView (const OutputViewType& output,
150  const InputViewType& input,
151  const Teuchos::Comm<int>& comm)
152 {
153  // If all the right conditions hold, we may all-reduce directly from
154  // the input to the output. Here are the relevant conditions:
155  //
156  // - assumeMpiCanAccessBuffers: May we safely assume that MPI may
157  // read from the input View and write to the output View? (Just
158  // because MPI _can_, doesn't mean that doing so will be faster.)
159  // - Do input and output Views alias each other, and is the
160  // communicator an intercommunicator? (Intercommunicators do not
161  // permit collectives to alias input and output buffers.)
162  // - Is either View noncontiguous?
163  //
164  // If either View is noncontiguous, we could use MPI_Type_Vector to
165  // create a noncontiguous MPI_Datatype, instead of packing and
166  // unpacking to resp. from a contiguous temporary buffer. Since
167  // MPI_Allreduce requires that the input and output buffers both
168  // have the same MPI_Datatype, this optimization might only work if
169  // the MPI communicator is an intercommunicator. Furthermore,
170  // creating an MPI_Datatype instance may require memory allocation
171  // anyway. Thus, it's probably better just to use a temporary
172  // contiguous buffer. We use a host buffer for that, since device
173  // buffers are slow to allocate.
174 
175  const bool viewsAlias = output.data () == input.data ();
176  if (comm.getSize () == 1) {
177  if (! viewsAlias) {
178  // InputViewType and OutputViewType can't be AnonymousSpace
179  // Views, because deep_copy needs to know their memory spaces.
180  Kokkos::deep_copy (output, input);
181  }
182  return;
183  }
184 
185  // We've had some experience that some MPI implementations do not
186  // perform well with UVM allocations.
187  const bool assumeMpiCanAccessBuffers =
188  ! view_is_cuda_uvm<OutputViewType>::value &&
189  ! view_is_cuda_uvm<InputViewType>::value &&
191 
192  const bool needContiguousTemporaryBuffers =
193  ! assumeMpiCanAccessBuffers ||
194  ::Tpetra::Details::isInterComm (comm) ||
195  output.span_is_contiguous () || input.span_is_contiguous ();
196  if (needContiguousTemporaryBuffers) {
197  auto output_tmp = makeContiguousBuffer (output);
198  auto input_tmp = makeContiguousBuffer (input);
199  Kokkos::deep_copy (input_tmp, input);
200  // It's OK if LayoutLeft allocations have padding at the end of
201  // each row. MPI might write to those padding bytes, but it's
202  // undefined behavior for users to use Kokkos to access whatever
203  // bytes are there, and the bytes there don't need to define valid
204  // ValueType instances.
205  allReduceRawContiguous (output_tmp.data (), input_tmp.data (),
206  output_tmp.span (), comm);
207  Kokkos::deep_copy (output, output_tmp);
208  }
209  else {
210  allReduceRawContiguous (output.data (), input.data (),
211  output.span (), comm);
212  }
213 }
214 
215 } // namespace Details
216 } // namespace Tpetra
217 
218 #endif // TPETRA_DETAILS_ALLREDUCEVIEW_HPP
void deep_copy(MultiVector< DS, DL, DG, DN > &dst, const MultiVector< SS, SL, SG, SN > &src)
Copy the contents of the MultiVector src into dst.
static void allReduceView(const OutputViewType &output, const InputViewType &input, const Teuchos::Comm< int > &comm)
All-reduce from input Kokkos::View to output Kokkos::View.
static bool assumeMpiIsCudaAware()
Whether to assume that MPI is CUDA aware.
Declaration of Tpetra::Details::isInterComm.
Declaration of Tpetra::Details::Behavior, a class that describes Tpetra&#39;s behavior.