1/*
2 * MPI_IBcastW.hpp
3 *
4 * Created on: Apr 8, 2017
5 * Author: i-bird
6 */
7
8#ifndef OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_
9#define OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_
10
11
12
13#include <mpi.h>
14
15
16
17/*! \brief Set of wrapping classing for MPI_Irecv
18 *
19 * The purpose of these classes is to correctly choose the right call based on the type we want to receive
20 *
21 */
22
23/*! \brief General recv for general buffer
24 *
25 * \param proc processor from which to receive
26 * \param tag
27 * \param buf buffer where to store the data
28 * \param sz size to receive
29 * \param req MPI request
30 *
31 */
32
33class MPI_IBcastWB
34{
35public:
36 static inline void bcast(size_t proc ,void * buf, size_t sz, MPI_Request & req)
37 {
38 MPI_SAFE_CALL(MPI_Ibcast(buf,sz,MPI_BYTE, proc , MPI_COMM_WORLD,&req));
39 }
40};
41
42/*! \brief General recv for vector of
43 *
44 * \tparam any type
45 *
46 */
47
48template<typename T> class MPI_IBcastW
49{
50public:
51 template<typename Memory> static inline void bcast(size_t proc ,openfpm::vector<T,Memory> & v, MPI_Request & req)
52 {
53 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size() * sizeof(T),MPI_BYTE, proc , MPI_COMM_WORLD,&req));
54 }
55};
56
57
58/*! \brief specialization for vector of integer
59 *
60 */
61template<> class MPI_IBcastW<int>
62{
63public:
64 static inline void bcast(size_t proc ,openfpm::vector<int> & v, MPI_Request & req)
65 {
66 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_INT, proc , MPI_COMM_WORLD,&req));
67 }
68};
69
70/*! \brief specialization for unsigned integer
71 *
72 */
73template<> class MPI_IBcastW<unsigned int>
74{
75public:
76 static inline void bcast(size_t proc ,openfpm::vector<unsigned int> & v, MPI_Request & req)
77 {
78 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED, proc , MPI_COMM_WORLD,&req));
79 }
80};
81
82/*! \brief specialization for short
83 *
84 */
85template<> class MPI_IBcastW<short>
86{
87public:
88 static inline void bcast(size_t proc ,openfpm::vector<short> & v, MPI_Request & req)
89 {
90 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_SHORT, proc , MPI_COMM_WORLD,&req));
91 }
92};
93
94/*! \brief specialization for short
95 *
96 */
97template<> class MPI_IBcastW<unsigned short>
98{
99public:
100 static inline void bcast(size_t proc ,openfpm::vector<unsigned short> & v, MPI_Request & req)
101 {
102 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_SHORT, proc , MPI_COMM_WORLD,&req));
103 }
104};
105
106/*! \brief specialization for char
107 *
108 */
109template<> class MPI_IBcastW<char>
110{
111public:
112 static inline void bcast(size_t proc ,openfpm::vector<char> & v, MPI_Request & req)
113 {
114 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_CHAR, proc , MPI_COMM_WORLD,&req));
115 }
116};
117
118/*! \brief specialization for char
119 *
120 */
121template<> class MPI_IBcastW<unsigned char>
122{
123public:
124 static inline void bcast(size_t proc ,openfpm::vector<unsigned char> & v, MPI_Request & req)
125 {
126 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_CHAR, proc , MPI_COMM_WORLD,&req));
127 }
128};
129
130/*! \brief specialization for size_t
131 *
132 */
133template<> class MPI_IBcastW<size_t>
134{
135public:
136 static inline void bcast(size_t proc ,openfpm::vector<size_t> & v, MPI_Request & req)
137 {
138 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_UNSIGNED_LONG, proc , MPI_COMM_WORLD,&req));
139 }
140};
141
142/*! \brief specialization for size_t
143 *
144 */
145template<> class MPI_IBcastW<long int>
146{
147public:
148 static inline void bcast(size_t proc ,openfpm::vector<long int> & v, MPI_Request & req)
149 {
150 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_LONG, proc , MPI_COMM_WORLD,&req));
151 }
152};
153
154/*! \brief specialization for float
155 *
156 */
157template<> class MPI_IBcastW<float>
158{
159public:
160 static inline void bcast(size_t proc ,openfpm::vector<float> & v, MPI_Request & req)
161 {
162 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_FLOAT, proc , MPI_COMM_WORLD,&req));
163 }
164};
165
166/*! \brief specialization for double
167 *
168 */
169template<> class MPI_IBcastW<double>
170{
171public:
172 static inline void bcast(size_t proc ,openfpm::vector<double> & v, MPI_Request & req)
173 {
174 MPI_SAFE_CALL(MPI_Ibcast(v.getPointer(), v.size(),MPI_DOUBLE, proc , MPI_COMM_WORLD,&req));
175 }
176};
177
178
179/*! \brief this class is a functor for "for_each" algorithm
180 *
181 * This class is a functor for "for_each" algorithm. For each
182 * element of the boost::vector the operator() is called.
183 * Is mainly used to process broadcast request for each buffer
184 *
185 */
186template<typename vect>
187struct bcast_inte_impl
188{
189 //! vector to broadcast
190 vect & send;
191
192 //! vector of requests
193 openfpm::vector<MPI_Request> & req;
194
195 //! root processor
196 size_t root;
197
198 /*! \brief constructor
199 *
200 * \param v set of pointer buffers to set
201 *
202 */
203 inline bcast_inte_impl(vect & send,
204 openfpm::vector<MPI_Request> & req,
205 size_t root)
206 :send(send),req(req),root(root)
207 {};
208
209 //! It call the copy function for each property
210 template<typename T>
211 inline void operator()(T& t)
212 {
213 typedef typename boost::mpl::at<typename vect::value_type::type,T>::type send_type;
214
215 // Create one request
216 req.add();
217
218 // gather
219 MPI_IBcastWB::bcast(root,&send.template get<T::value>(0),send.size()*sizeof(send_type),req.last());
220 }
221};
222
223template<bool is_lin_or_inte>
224struct b_cast_helper
225{
226 template<typename T, typename Mem, template<typename> class layout_base >
227 static void bcast_(openfpm::vector<MPI_Request> & req,
228 openfpm::vector<T,Mem,layout_base> & v,
229 size_t root)
230 {
231 // Create one request
232 req.add();
233
234 // gather
235 MPI_IBcastW<T>::bcast(root,v,req.last());
236 }
237};
238
239template<>
240struct b_cast_helper<false>
241{
242 template<typename T, typename Mem, template<typename> class layout_base >
243 static void bcast_(openfpm::vector<MPI_Request> & req,
244 openfpm::vector<T,Mem,layout_base> & v,
245 size_t root)
246 {
247 bcast_inte_impl<openfpm::vector<T,Mem,layout_base>> bc(v,req,root);
248
249 boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(bc);
250 }
251};
252
253#endif /* OPENFPM_VCLUSTER_SRC_MPI_WRAPPER_MPI_IBCASTW_HPP_ */
254