Rheolef  7.1
an efficient C++ finite element environment
mpi_scatter_begin.h
Go to the documentation of this file.
1 #ifndef _RHEO_MPI_SCATTER_BEGIN_H
2 #define _RHEO_MPI_SCATTER_BEGIN_H
23 
24 # include "rheolef/msg_right_permutation_apply.h"
25 # include "rheolef/msg_both_permutation_apply.h"
26 
27 namespace rheolef {
28 /*F:
29 NAME: mpi_scatter_begin -- gather/scatter initialize (@PACKAGE@ @VERSION@)
30 DESCRIPTION:
31  Begin communication
32  for distributed to sequential scatter context.
33 COMPLEXITY:
34 IMPLEMENTATION:
35  Even though the next routines are written with distributed
36  vectors, either x or y (but not both) may be sequential
37  vectors, one for each processor.
38 
39  from indices indicate where arriving stuff is stashed
40  to indices indicate where departing stuff came from.
41  the naming can be a little confusing.
42 
43  Reverse scatter is obtained by swapping (to,from)
44  in calls to scatter_begin and scatter_end.
45  Reverse scatter is useful for transpose matrix-vector multiply.
46 
47  Scatter general operation is handled by a template "SetOp" type.
48  The special case "SetOp=set_op" of an affectation is treated
49  separatly, tacking care of local scatter affectation.
50  Thus, the "mpi_scatter_begin" routine is split into
51  "msg_scatter_begin_global" and "msg_scatter_begin_local"
52  parts.
53 AUTHORS:
54  LMC-IMAG, 38041 Grenoble cedex 9, France
55  | Pierre.Saramito@imag.fr
56 DATE: 23 march 1999
57 END:
58 */
59 
60 //<mpi_scatter_begin:
61 template <
62  class InputIterator,
63  class Message,
64  class Tag,
65  class Comm>
66 void
68  InputIterator x,
69  Message& from,
70  Message& to,
71  Tag tag,
72  Comm comm)
73 {
74  typedef typename Message::size_type size_type;
75  // ----------------------------------------------------------
76  // 1) post receives
77  // ----------------------------------------------------------
78  from.requests.clear();
79  {
80  size_type n_receive = from.starts().size() - 1;
81  size_type i_start = 0;
82  for (size_type i = 0; i < n_receive; i++) {
83  size_type i_size = from.starts() [i+1] - from.starts() [i];
84  mpi::request i_req = comm.irecv(
85  from.procs() [i],
86  tag,
87  from.values().begin().operator->() + i_start,
88  i_size);
89  i_start += i_size;
90  from.requests.push_back (std::make_pair(i, i_req));
91  }
92  } // end block
93  // ----------------------------------------------------------
94  // 2) apply right permutation
95  // ----------------------------------------------------------
96  to.load_values (x);
97 
98  // ----------------------------------------------------------
99  // 3) do sends
100  // ----------------------------------------------------------
101  to.requests.clear();
102  {
103  size_type n_send = to.starts().size() - 1;
104  size_type i_start = 0;
105  for (size_type i = 0; i < n_send; i++) {
106  size_type i_size = to.starts() [i+1] - to.starts() [i];
107  mpi::request i_req = comm.isend(
108  to.procs() [i],
109  tag,
110  to.values().begin().operator->() + i_start,
111  i_size);
112  i_start += i_size;
113  to.requests.push_back (std::make_pair(i, i_req));
114  }
115  } // end block
116 }
117 template <
118  class InputIterator,
119  class OutputIterator,
120  class SetOp,
121  class Message>
122 void
124  InputIterator x,
125  OutputIterator y,
126  Message& from,
127  Message& to,
128  SetOp op)
129 {
130 #ifdef TO_CLEAN
132  to.local_slots.begin(),
133  to.local_slots.end(),
134  x,
135  op,
136  from.local_slots.begin(),
137  y);
138 #endif // TO_CLEAN
139 }
140 // take care of local insert: template specialisation
141 template <
142  class InputIterator,
143  class OutputIterator,
144  class Message>
145 void
147  InputIterator x,
148  OutputIterator y,
149  Message& from,
150  Message& to,
152 {
153 #ifdef TO_CLEAN
154  // used when x & y have distinct pointer types (multi-valued)
155  if (y == x && ! to.local_nonmatching_computed) {
156  // scatter_local_optimize(to,from);
157  fatal_macro ("y == x: adress matches in scatter: not yet -- sorry");
158  }
159  if (to.local_is_copy) {
160 
161  std::copy(x + to.local_copy_start,
162  x + to.local_copy_start + to.local_copy_length,
163  y + from.local_copy_start);
164 
165  } else if (y != x || ! to.local_nonmatching_computed) {
166 
168  to.local_slots.begin(),
169  to.local_slots.end(),
170  x,
171  op,
172  from.local_slots.begin(),
173  y);
174 
175  } else { // !to.local_is_copy && y == x && to.local_nonmatching_computed
176 
178  to.local_slots_nonmatching.begin(),
179  to.local_slots_nonmatching.end(),
180  x,
181  op,
182  from.local_slots_nonmatching.begin(),
183  y);
184  }
185 #endif // TO_CLEAN
186 }
187 template <
188  class InputIterator,
189  class OutputIterator,
190  class Message,
191  class SetOp,
192  class Tag,
193  class Comm>
194 inline
195 void
197  InputIterator x,
198  OutputIterator y,
199  Message& from,
200  Message& to,
201  SetOp op,
202  Tag tag,
203  Comm comm)
204 {
205  mpi_scatter_begin_global (x, from, to, tag, comm);
206  if (to.n_local() == 0) {
207  return;
208  }
209  error_macro ("local messages: no more supported");
210  mpi_scatter_begin_local (x, y, from, to, op);
211 }
212 //>mpi_scatter_begin:
213 } // namespace rheolef
214 #endif // _RHEO_MPI_SCATTER_BEGIN_H
field::size_type size_type
Definition: branch.cc:425
size_t size_type
Definition: basis_get.cc:76
#define error_macro(message)
Definition: dis_macros.h:49
#define fatal_macro(message)
Definition: dis_macros.h:33
This file is part of Rheolef.
void mpi_scatter_begin_local(InputIterator x, OutputIterator y, Message &from, Message &to, SetOp op)
void mpi_scatter_begin_global(InputIterator x, Message &from, Message &to, Tag tag, Comm comm)
void msg_both_permutation_apply(InputIterator1 px, InputIterator1 last_px, InputRandomIterator x, SetOp set_op, InputIterator2 py, OutputRandomIterator y)
void mpi_scatter_begin(InputIterator x, OutputIterator y, Message &from, Message &to, SetOp op, Tag tag, Comm comm)