fpmas 1.5
ring_graph_builder.h
Go to the documentation of this file.
1#ifndef FPMAS_RING_GRAPH_BUILDER_H
2#define FPMAS_RING_GRAPH_BUILDER_H
3
12
13#include <list>
14
15namespace fpmas { namespace graph {
19 enum RingType {
27 CYCLE
28 };
29
54 template<typename T>
56 private:
57 std::size_t k;
58 RingType ring_type;
59
60 template<typename LocalNodeIterator>
61 std::vector<api::graph::DistributedNode<T>*> build_distant_nodes(
62 // Graph in which distant nodes are inserted
64 // Used to build distant nodes
66 // Number of nodes available on each processes
67 const std::vector<std::size_t>& node_counts,
68 // An iterator to the first nodes that should be send
69 // to each processes, depending on the current loop
70 // direction.
71 LocalNodeIterator local_nodes_iterator,
72 // returns the rank of the process at `distance` from
73 // the current process, depending on the current loop
74 // direction.
75 std::function<int (int /*level*/)> next_rank
76 );
77 template<typename NodeIterator>
78 void link_loop(
80 NodeIterator local_node_iterator,
81 std::size_t local_node_count,
82 NodeIterator link_node_iterator,
84 );
85 public:
95 RingGraphBuilder(std::size_t k, RingType ring_type=CYCLE)
96 : k(k), ring_type(ring_type) {
97 }
98
99 std::vector<api::graph::DistributedNode<T>*> build(
102 api::graph::DistributedGraph<T>& graph) override;
103 };
104
105 template<typename T> template <typename LocalNodeIterator>
106 std::vector<api::graph::DistributedNode<T>*> RingGraphBuilder<T>
107 ::build_distant_nodes(
110 const std::vector<std::size_t>& node_counts,
111 LocalNodeIterator local_nodes_iterator,
112 std::function<int (int)> next_rank
113 ) {
114 // MPI set up
116 graph.getMpiCommunicator()
117 );
119 graph.getMpiCommunicator()
120 );
121
122 std::unordered_map<int, std::size_t> node_count_requests;
123 std::unordered_map<int, std::list<DistributedId>> ids_requests;
124
125 /*
126 * Feeds the node_count_requests to know how many distant nodes
127 * must be imported from each process.
128 * The objective is to found enough nodes so that all the local
129 * nodes on the current process can be linked to k nodes.
130 */
131 // Count of found nodes
132 std::size_t count = 0;
133 // Distance from the inspected process
134 int level = 1;
135 // List of ranks from which nodes will be imported, ordered from
136 // the closest to the furthest. Notice that the current rank might
137 // be contained at the end of the list. (this is for example
138 // required for N=4 and k=3 on 2 processes)
139 std::vector<int> ranks;
140 while(count < k) {
141 // Inspect the process at a distance `level` from the current
142 // process. Depending on the current loop orientation,
143 // `_next_rank` might be before or after the current process.
144 int _next_rank = next_rank(level);
145 // Computes the number of nodes that will be imported from
146 // next_rank
147 std::size_t node_count = std::min(
148 // Imports only the exact node count required, so that
149 // at the end of the while loop count == k
150 node_counts[_next_rank], k-count
151 );
152 node_count_requests[_next_rank] = node_count;
153 count+=node_count;
154 level++;
155 ranks.push_back(_next_rank);
156 }
157
158 node_count_requests = int_mpi.allToAll(node_count_requests);
159
160 // Respond to other processes requests
161 for(auto item : node_count_requests) {
162 // Depending on the current loop orientation, `it` will
163 // correspond to the begin or end of local nodes
164 auto it = local_nodes_iterator;
165
166 // In any case, the `item.second` nodes away from
167 // `local_nodes_iterator` must be sent to the requesting
168 // process.
169 std::list<DistributedId>& ids = ids_requests[item.first];
170 for(std::size_t i = 0; i < item.second; i++)
171 // it++ goes forward or backward, depending on the
172 // specified iterator
173 ids.push_back((*it++)->getId());
174 }
175
176 ids_requests = list_id_mpi.allToAll(ids_requests);
177
178 // Builds all the required distant nodes
179 std::vector<api::graph::DistributedNode<T>*> built_nodes;
180 for(auto rank : ranks)
181 for(auto node_id : ids_requests[rank])
182 built_nodes.push_back(
183 node_builder.buildDistantNode(node_id, rank, graph));
184
185 return built_nodes;
186 }
187
188 template<typename T>
189 template<typename NodeIterator>
190 void RingGraphBuilder<T>::link_loop(
191 api::graph::DistributedGraph<T>& graph,
192 NodeIterator local_node_iterator,
193 std::size_t local_node_count,
194 NodeIterator link_node_iterator,
196 ) {
197 // Create links between nodes to build a loop
198 //
199 // At this point, the link_node list contains exactly k distant
200 // nodes in addition to the local_node list. Depending of the
201 // current orientation of the built loop, the k additional distant
202 // nodes might be at the beginning or at the end of the link_node
203 // list, but this is not important here: the algorithm starts from
204 // the provided `local_node_iterator`, and links each of the
205 // `local_node_count` local node to its k nearest neighbors in the
206 // loop. The last local nodes will notably be linked to the last k
207 // distant nodes of the link_node list.
208 NodeIterator local_node = local_node_iterator;
209 for(std::size_t n = 0; n < local_node_count; n++) {
210 NodeIterator link_node = link_node_iterator;
211 // Puts the `link_node` iterator at the same position as
212 // `local_node` in the local node list
213 std::advance(link_node, n);
214 // Links the local_node to the k next nodes in the link node
215 // list
216 for(std::size_t i = 0; i < k; i++) {
217 // Selects the next link node
218 link_node++;
219 // Links the local node to the link node
220 graph.link(*local_node, *link_node, layer);
221 }
222 local_node++;
223 }
224
225 graph.synchronizationMode().getSyncLinker().synchronize();
226 }
227
228 template<typename T>
229 std::vector<api::graph::DistributedNode<T>*> RingGraphBuilder<T>::build(
233 // Builds local nodes
234 std::list<api::graph::DistributedNode<T>*> local_nodes;
235 while(node_builder.localNodeCount() > 0)
236 local_nodes.push_back(node_builder.buildNode(graph));
237
238 // MPI related set up
239 int rank = graph.getMpiCommunicator().getRank();
240 int size = graph.getMpiCommunicator().getSize();
241
243 graph.getMpiCommunicator());
244
245 // Gathers the total node counts on each processes
246 std::vector<std::size_t> node_counts = int_mpi.allGather(local_nodes.size());
247
248 {
249 /*
250 * Forward loop
251 */
252
253 std::list<api::graph::DistributedNode<T>*> link_nodes = local_nodes;
254
255 auto distant_nodes = build_distant_nodes(
256 graph, node_builder,
257 node_counts,
258 // The BEGINNING of the local nodes will be sent to the
259 // PREVIOUS processes
260 local_nodes.begin(),
261 [rank, size] (int level) -> int {
262 // Nodes are requested to processes AFTER the
263 // current process
264 return (rank+level)%size;
265 }
266 );
267
268 // The BEGIN nodes are imported from the NEXT processes: they
269 // correspond to the nodes that will be link to the END nodes
270 // of the current process, so they are added at the end of the
271 // link_nodes list.
272 for(auto node : distant_nodes)
273 link_nodes.push_back(node);
274
275 // Links forward loop: the local node list is crossed from
276 // begin to end
277 link_loop(
278 graph,
279 local_nodes.begin(), local_nodes.size(),
280 link_nodes.begin(), layer
281 );
282 } if(ring_type == CYCLE) {
283 /*
284 * Backward loop
285 */
286 std::list<api::graph::DistributedNode<T>*> link_nodes = local_nodes;
287
288 auto distant_nodes = build_distant_nodes(
289 graph, node_builder,
290 node_counts,
291 // The ENDING of the local nodes will be sent to the
292 // NEXT processes
293 local_nodes.rbegin(),
294 [rank, size] (int current_level) -> int {
295 // Nodes are requested to processes BEFORE the
296 // current process
297 return (rank+size-current_level)%size;
298 }
299 );
300
301 // The END nodes are imported from the PREVIOUS processes: they
302 // correspond to the nodes that will be linked to the BEGIN
303 // nodes of the current process, so they are added at beginning
304 // of the link_nodes list.
305 for(auto node : distant_nodes)
306 link_nodes.push_front(node);
307
308 // Links backward loop: the local node list is crossed from end
309 // to begin
310 link_loop(
311 graph,
312 local_nodes.rbegin(), local_nodes.size(),
313 link_nodes.rbegin(), layer
314 );
315 }
316
317 return {local_nodes.begin(), local_nodes.end()};
318 }
319}}
320#endif
Definition: graph_builder.h:154
Definition: distributed_graph.h:169
virtual api::communication::MpiCommunicator & getMpiCommunicator()=0
Definition: graph_builder.h:89
virtual std::size_t localNodeCount()=0
virtual DistributedNode< T > * buildDistantNode(DistributedId id, int location, DistributedGraph< T > &graph)=0
virtual DistributedNode< T > * buildNode(DistributedGraph< T > &graph)=0
std::vector< T > allGather(const T &) override
Definition: communication.h:492
Definition: ring_graph_builder.h:55
RingGraphBuilder(std::size_t k, RingType ring_type=CYCLE)
Definition: ring_graph_builder.h:95
std::vector< api::graph::DistributedNode< T > * > build(api::graph::DistributedNodeBuilder< T > &node_builder, api::graph::LayerId layer, api::graph::DistributedGraph< T > &graph) override
Definition: ring_graph_builder.h:229
int LayerId
Definition: edge.h:13
RingType
Definition: ring_graph_builder.h:19
@ LOOP
Definition: ring_graph_builder.h:23
@ CYCLE
Definition: ring_graph_builder.h:27
Definition: fpmas.cpp:3
Definition: communication.h:590