1 | /* |
2 | * grid_common.hpp |
3 | * |
4 | * Created on: Oct 31, 2015 |
5 | * Author: i-bird |
6 | */ |
7 | |
8 | #ifndef OPENFPM_DATA_SRC_GRID_GRID_COMMON_HPP_ |
9 | #define OPENFPM_DATA_SRC_GRID_GRID_COMMON_HPP_ |
10 | |
11 | #include <type_traits> |
12 | #include "util/tokernel_transformation.hpp" |
13 | |
14 | /*! \brief this class is a functor for "for_each" algorithm |
15 | * |
16 | * This class is a functor for "for_each" algorithm. For each |
17 | * element of the boost::vector the operator() is called. |
18 | * Is mainly used to call hostToDevice for each properties |
19 | * |
20 | */ |
21 | template<typename aggrT_src, typename local_grids_type> |
22 | struct setBackground_impl |
23 | { |
24 | aggrT_src & bck; |
25 | |
26 | local_grids_type & loc_grid; |
27 | |
28 | inline setBackground_impl(aggrT_src & bck, local_grids_type & loc_grid) |
29 | :bck(bck),loc_grid(loc_grid) |
30 | {}; |
31 | |
32 | //! It call the copy function for each property |
33 | template<typename T> |
34 | inline void operator()(T& t) |
35 | { |
36 | for (size_t i = 0 ; i < loc_grid.size() ; i++) |
37 | { |
38 | loc_grid.get(i).template setBackgroundValue<T::value>(bck.template get<T::value>()); |
39 | } |
40 | } |
41 | }; |
42 | |
43 | /*! \brief this class is a functor for "for_each" algorithm |
44 | * |
45 | * This class is a functor for "for_each" algorithm. For each |
46 | * element of the boost::vector the operator() is called. |
47 | * Is mainly used to call hostToDevice for each properties |
48 | * |
49 | */ |
50 | template<typename Tv> |
51 | struct host_to_dev_all_prp |
52 | { |
53 | Tv & p; |
54 | |
55 | inline host_to_dev_all_prp(Tv & p) |
56 | :p(p) |
57 | {}; |
58 | |
59 | //! It call the copy function for each property |
60 | template<typename T> |
61 | inline void operator()(T& t) const |
62 | { |
63 | p.template hostToDevice<T::value>(); |
64 | } |
65 | }; |
66 | |
67 | |
68 | template<typename T, typename T_ker, typename type_prp, template<typename> class layout_base , int is_vector> |
69 | struct call_recursive_host_device_if_vector |
70 | { |
71 | template<typename mem_type, typename obj_type> static void transform(mem_type * mem, obj_type & obj, size_t start, size_t stop) |
72 | { |
73 | start /= sizeof(type_prp); |
74 | stop /= sizeof(type_prp); |
75 | |
76 | // The type of device and the type on host does not match (in general) |
77 | // So we have to convert before transfer |
78 | |
79 | T * ptr = static_cast<T *>(obj.get_pointer()); |
80 | |
81 | mem_type tmp; |
82 | |
83 | tmp.allocate(mem->size()); |
84 | |
85 | T_ker * ptr_tt = static_cast<T_ker *>(tmp.getPointer()); |
86 | |
87 | for(size_t i = start ; i < stop ; i++) |
88 | { |
89 | new (&ptr_tt[i]) T_ker(); |
90 | ptr_tt[i] = ptr[i].toKernel(); |
91 | } |
92 | |
93 | mem->hostToDevice(tmp); |
94 | } |
95 | |
96 | //! It is a vector recursively call deviceToHost |
97 | template<typename obj_type> |
98 | static void call(obj_type & obj, size_t start, size_t stop) |
99 | { |
100 | T * ptr = static_cast<T *>(obj.get_pointer()); |
101 | |
102 | for(size_t i = start ; i < stop ; i++) |
103 | { |
104 | host_to_dev_all_prp<T> hdap(ptr[i]); |
105 | |
106 | boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::value_type::max_prop>>(hdap); |
107 | } |
108 | } |
109 | }; |
110 | |
111 | template<typename T, typename T_ker, typename type_prp ,template<typename> class layout_base> |
112 | struct call_recursive_host_device_if_vector<T,T_ker,type_prp,layout_base,0> |
113 | { |
114 | template<typename mem_type,typename obj_type> static void transform(mem_type * mem, obj_type & obj, size_t start, size_t stop) |
115 | { |
116 | mem->hostToDevice(start,stop); |
117 | } |
118 | |
119 | //! It is not a vector nothing to do |
120 | template<typename obj_type> |
121 | static void call(obj_type & obj, size_t start, size_t stop) {} |
122 | }; |
123 | |
124 | template<typename T, typename T_ker, typename type_prp ,template<typename> class layout_base> |
125 | struct call_recursive_host_device_if_vector<T,T_ker,type_prp,layout_base,3> |
126 | { |
127 | template<typename mem_type,typename obj_type> static void transform(mem_type * mem, obj_type & obj, size_t start, size_t stop) |
128 | { |
129 | // calculate the start and stop elements |
130 | start /= std::extent<type_prp,0>::value; |
131 | stop /= std::extent<type_prp,0>::value; |
132 | size_t sz = mem->size() / std::extent<type_prp,0>::value; |
133 | |
134 | size_t offset = 0; |
135 | for (size_t i = 0 ; i < std::extent<type_prp,0>::value ; i++) |
136 | { |
137 | mem->hostToDevice(offset+start,offset+stop); |
138 | offset += sz; |
139 | } |
140 | } |
141 | |
142 | //! It is not a vector nothing to do |
143 | template<typename obj_type> |
144 | static void call(obj_type & obj, size_t start, size_t stop) {} |
145 | }; |
146 | |
147 | template<typename T, typename T_ker, typename type_prp ,template<typename> class layout_base> |
148 | struct call_recursive_host_device_if_vector<T,T_ker,type_prp,layout_base,4> |
149 | { |
150 | template<typename mem_type,typename obj_type> static void transform(mem_type * mem, obj_type & obj, size_t start, size_t stop) |
151 | { |
152 | // calculate the start and stop elements |
153 | start = start / std::extent<type_prp,0>::value / std::extent<type_prp,1>::value; |
154 | stop = stop / std::extent<type_prp,0>::value / std::extent<type_prp,1>::value; |
155 | size_t sz = mem->size() / std::extent<type_prp,0>::value / std::extent<type_prp,1>::value; |
156 | |
157 | size_t offset = 0; |
158 | for (size_t i = 0 ; i < std::extent<type_prp,0>::value ; i++) |
159 | { |
160 | for (size_t j = 0 ; j < std::extent<type_prp,1>::value ; j++) |
161 | { |
162 | mem->hostToDevice(offset+start,offset+stop); |
163 | offset += sz; |
164 | } |
165 | } |
166 | } |
167 | |
168 | //! It is not a vector nothing to do |
169 | template<typename obj_type> |
170 | static void call(obj_type & obj, size_t start, size_t stop) {} |
171 | }; |
172 | |
173 | /////////// destructor |
174 | |
175 | |
176 | template<typename T, typename T_ker, typename type_prp, template<typename> class layout_base , int is_vector> |
177 | struct call_recursive_destructor_if_vector |
178 | { |
179 | template<typename mem_type, typename obj_type> static void destruct(mem_type * mem, obj_type & obj) |
180 | { |
181 | size_t sz = mem->size() / sizeof(type_prp); |
182 | // The type of device and the type on host does not match (in general) |
183 | // So we have to convert before transfer |
184 | |
185 | mem_type tmp; |
186 | |
187 | tmp.allocate(mem->size()); |
188 | |
189 | mem->deviceToHost(tmp); |
190 | T_ker * ptr = static_cast<T_ker *>(tmp.getPointer()); |
191 | |
192 | for(size_t i = 0 ; i < sz ; i++) |
193 | { |
194 | ptr->~T_ker(); |
195 | ++ptr; |
196 | } |
197 | } |
198 | }; |
199 | |
200 | template<typename T, typename T_ker, typename type_prp ,template<typename> class layout_base> |
201 | struct call_recursive_destructor_if_vector<T,T_ker,type_prp,layout_base,0> |
202 | { |
203 | template<typename mem_type,typename obj_type> static void destruct(mem_type * mem, obj_type & obj) |
204 | {} |
205 | }; |
206 | |
207 | template<typename T, typename T_ker, typename type_prp ,template<typename> class layout_base> |
208 | struct call_recursive_destructor_if_vector<T,T_ker,type_prp,layout_base,3> |
209 | { |
210 | template<typename mem_type,typename obj_type> static void destruct(mem_type * mem, obj_type & obj) |
211 | {} |
212 | }; |
213 | |
214 | template<typename T, typename T_ker, typename type_prp ,template<typename> class layout_base> |
215 | struct call_recursive_destructor_if_vector<T,T_ker,type_prp,layout_base,4> |
216 | { |
217 | template<typename mem_type,typename obj_type> static void destruct(mem_type * mem, obj_type & obj) |
218 | {} |
219 | }; |
220 | |
221 | /////////////////////// |
222 | |
223 | /*! \brief this class is a functor for "for_each" algorithm |
224 | * |
225 | * This class is a functor for "for_each" algorithm. For each |
226 | * element of the boost::vector the operator() is called. |
227 | * Is mainly used to copy one object into one target |
228 | * grid element in a generic way for a |
229 | * generic object T with variable number of property |
230 | * |
231 | * \tparam dim Dimensionality |
232 | * \tparam S type of grid |
233 | * \tparam Memory type of memory needed for encap |
234 | * |
235 | */ |
236 | |
237 | template<unsigned int dim, typename S, typename Memory> |
238 | struct copy_cpu_encap |
239 | { |
240 | //! size to allocate |
241 | grid_key_dx<dim> & key; |
242 | |
243 | //! grid where we have to store the data |
244 | S & grid_dst; |
245 | |
246 | //! type of the object we have to set |
247 | typedef typename S::value_type obj_type; |
248 | |
249 | //! type of the object boost::sequence |
250 | typedef typename S::value_type::type ov_seq; |
251 | |
252 | //! object we have to store |
253 | const encapc<1,obj_type,Memory> & obj; |
254 | |
255 | /*! \brief constructor |
256 | * |
257 | * It define the copy parameters. |
258 | * |
259 | * \param key which element we are modifying |
260 | * \param grid_dst grid we are updating |
261 | * \param obj object we have to set in grid_dst (encapsulated) |
262 | * |
263 | */ |
264 | inline copy_cpu_encap(grid_key_dx<dim> & key, S & grid_dst, const encapc<1,obj_type,Memory> & obj) |
265 | :key(key),grid_dst(grid_dst),obj(obj){}; |
266 | |
267 | |
268 | #ifdef SE_CLASS1 |
269 | /*! \brief Constructor |
270 | * |
271 | * Calling this constructor produce an error. This class store the reference of the object, |
272 | * this mean that the object passed must not be a temporal object |
273 | * |
274 | */ |
275 | inline copy_cpu_encap(grid_key_dx<dim> & key, S & grid_dst, const encapc<1,obj_type,Memory> && obj) |
276 | :key(key),grid_dst(grid_dst),obj(obj) |
277 | {std::cerr << "Error: " <<__FILE__ << ":" << __LINE__ << " Passing a temporal object" ;}; |
278 | #endif |
279 | |
280 | //! It call the copy function for each property |
281 | template<typename T> |
282 | inline void operator()(T& t) const |
283 | { |
284 | // Remove the reference from the type to copy |
285 | typedef typename boost::remove_reference<decltype(grid_dst.template get<T::value>(key))>::type copy_rtype; |
286 | |
287 | meta_copy<copy_rtype>::meta_copy_(obj.template get<T::value>(),grid_dst.template get<T::value>(key)); |
288 | } |
289 | }; |
290 | |
291 | |
292 | /*! \brief Metafunction take T and return a reference |
293 | * |
294 | * Metafunction take T and return a reference |
295 | * |
296 | * \param T type |
297 | * |
298 | */ |
299 | |
300 | template<typename T> |
301 | struct mem_reference |
302 | { |
303 | typedef T& type; |
304 | }; |
305 | |
306 | /*! \brief Options for remove copy |
307 | * |
308 | * |
309 | */ |
310 | enum rem_copy_opt |
311 | { |
312 | NONE_OPT = 0, |
313 | PHASE1 = 0, |
314 | PHASE2 = 1, |
315 | PHASE3 = 2, |
316 | // This option indicate that the geometrical structure of the sparse-grid has not changed |
317 | KEEP_GEOMETRY = 4, |
318 | }; |
319 | |
320 | #endif /* OPENFPM_DATA_SRC_GRID_GRID_COMMON_HPP_ */ |
321 | |