Tpetra parallel linear algebra  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
Tpetra_Details_DistributorActor.cpp
1 // @HEADER
2 // *****************************************************************************
3 // Tpetra: Templated Linear Algebra Services Package
4 //
5 // Copyright 2008 NTESS and the Tpetra contributors.
6 // SPDX-License-Identifier: BSD-3-Clause
7 // *****************************************************************************
8 // @HEADER
9 
10 #include "Tpetra_Details_DistributorActor.hpp"
11 
12 namespace Tpetra::Details {
13 
14  DistributorActor::DistributorActor()
15  : mpiTag_(DEFAULT_MPI_TAG) {}
16 
17  void DistributorActor::doWaits(const DistributorPlan& plan) {
18  doWaitsRecv(plan);
19  doWaitsSend(plan);
20  }
21 
22  void DistributorActor::doWaitsRecv(const DistributorPlan& plan) {
23  if (requestsRecv_.size() > 0) {
24  ProfilingRegion wr("Tpetra::Distributor::doWaitsRecv");
25 
26  Teuchos::waitAll(*plan.getComm(), requestsRecv_());
27 
28  // Restore the invariant that requests_.size() is the number of
29  // outstanding nonblocking communication requests.
30  requestsRecv_.resize(0);
31  }
32 
33  doWaitsIalltofewv(plan);
34  }
35 
36  void DistributorActor::doWaitsSend(const DistributorPlan& plan) {
37  if (requestsSend_.size() > 0) {
38  ProfilingRegion ws("Tpetra::Distributor::doWaitsSend");
39 
40  Teuchos::waitAll(*plan.getComm(), requestsSend_());
41 
42  // Restore the invariant that requests_.size() is the number of
43  // outstanding nonblocking communication requests.
44  requestsSend_.resize(0);
45  }
46  }
47 
48  void DistributorActor::doWaitsIalltofewv(const DistributorPlan& plan) {
49 #ifdef HAVE_TPETRA_MPI
50  if (ialltofewv_.req) {
51 
52  ProfilingRegion ws("Tpetra::Distributor: doWaitsIalltofewv");
53  ialltofewv_.impl.wait(*ialltofewv_.req);
54 
55  ialltofewv_.sendcounts.reset();
56  ialltofewv_.sdispls.reset();
57  ialltofewv_.recvcounts.reset();
58  ialltofewv_.rdispls.reset();
59  ialltofewv_.req = std::nullopt;
60  ialltofewv_.roots.clear();
61  }
62 #endif
63  }
64 
65  bool DistributorActor::isReady() const {
66  bool result = true;
67  for (auto& request : requestsRecv_) {
68  result &= request->isReady();
69  }
70  for (auto& request : requestsSend_) {
71  result &= request->isReady();
72  }
73 
74  // isReady just calls MPI_Test and returns flag != 0
75  // don't use test because these are for a collective, and not
76  // all ranks may call test, so progress may not be possible
77 #ifdef HAVE_TPETRA_MPI
78  if (ialltofewv_.req) {
79  int flag;
80  ialltofewv_.impl.get_status(*ialltofewv_.req, &flag, MPI_STATUS_IGNORE);
81  result &= flag;
82  }
83 #endif
84 
85  return result;
86  }
87 }