Tpetra parallel linear algebra  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
Tpetra_Details_DistributorActor.cpp
1 // @HEADER
2 // *****************************************************************************
3 // Tpetra: Templated Linear Algebra Services Package
4 //
5 // Copyright 2008 NTESS and the Tpetra contributors.
6 // SPDX-License-Identifier: BSD-3-Clause
7 // *****************************************************************************
8 // @HEADER
9 
10 #include "Tpetra_Details_DistributorActor.hpp"
11 
12 namespace Tpetra::Details {
13 
14 DistributorActor::DistributorActor()
15  : mpiTag_(DEFAULT_MPI_TAG) {}
16 
17 void DistributorActor::doWaits(const DistributorPlan& plan) {
18  doWaitsRecv(plan);
19  doWaitsSend(plan);
20 }
21 
22 void DistributorActor::doWaitsRecv(const DistributorPlan& plan) {
23  if (requestsRecv_.size() > 0) {
24  ProfilingRegion wr("Tpetra::Distributor::doWaitsRecv");
25 
26  Teuchos::waitAll(*plan.getComm(), requestsRecv_());
27 
28  // Restore the invariant that requests_.size() is the number of
29  // outstanding nonblocking communication requests.
30  requestsRecv_.resize(0);
31  }
32 
33  doWaitsIalltofewv(plan);
34 }
35 
36 void DistributorActor::doWaitsSend(const DistributorPlan& plan) {
37  if (requestsSend_.size() > 0) {
38  ProfilingRegion ws("Tpetra::Distributor::doWaitsSend");
39 
40  Teuchos::waitAll(*plan.getComm(), requestsSend_());
41 
42  // Restore the invariant that requests_.size() is the number of
43  // outstanding nonblocking communication requests.
44  requestsSend_.resize(0);
45  }
46 }
47 
48 void DistributorActor::doWaitsIalltofewv(const DistributorPlan& plan) {
49 #ifdef HAVE_TPETRA_MPI
50  if (ialltofewv_.req) {
51  ProfilingRegion ws("Tpetra::Distributor::doWaitsIalltofewv");
52  ialltofewv_.impl.wait(*ialltofewv_.req);
53 
54  ialltofewv_.sendcounts.reset();
55  ialltofewv_.sdispls.reset();
56  ialltofewv_.recvcounts.reset();
57  ialltofewv_.rdispls.reset();
58  ialltofewv_.req = std::nullopt;
59  ialltofewv_.roots.clear();
60  }
61 #endif
62 }
63 
64 bool DistributorActor::isReady() const {
65  bool result = true;
66  for (auto& request : requestsRecv_) {
67  result &= request->isReady();
68  }
69  for (auto& request : requestsSend_) {
70  result &= request->isReady();
71  }
72 
73  // isReady just calls MPI_Test and returns flag != 0
74  // don't use test because these are for a collective, and not
75  // all ranks may call test, so progress may not be possible
76 #ifdef HAVE_TPETRA_MPI
77  if (ialltofewv_.req) {
78  int flag;
79  ialltofewv_.impl.get_status(*ialltofewv_.req, &flag, MPI_STATUS_IGNORE);
80  result &= flag;
81  }
82 #endif
83 
84  return result;
85 }
86 } // namespace Tpetra::Details