1/*
2 * grid_base_implementation.hpp
3 *
4 * Created on: May 2, 2016
5 * Author: i-bird
6 */
7
8#ifndef OPENFPM_DATA_SRC_GRID_GRID_BASE_IMPLEMENTATION_HPP_
9#define OPENFPM_DATA_SRC_GRID_GRID_BASE_IMPLEMENTATION_HPP_
10
11#include "grid_base_impl_layout.hpp"
12#include "util/cuda_util.hpp"
13#include "cuda/cuda_grid_gpu_funcs.cuh"
14#include "util/create_vmpl_sequence.hpp"
15#include "util/cuda_launch.hpp"
16#include "util/object_si_di.hpp"
17
18constexpr int DATA_ON_HOST = 32;
19constexpr int DATA_ON_DEVICE = 64;
20constexpr int EXACT_RESIZE = 128;
21
22template<bool np,typename T>
23struct skip_init
24{
25 static bool skip_()
26 {
27 return true;
28 }
29};
30
31template<typename T>
32struct skip_init<true,T>
33{
34 static bool skip_()
35 {
36 return T::noPointers();
37 }
38};
39
40#ifdef CUDA_GPU
41
42#define GRID_ID_3_RAW(start,stop) int x[3] = {threadIdx.x + blockIdx.x * blockDim.x + start.get(0),\
43 threadIdx.y + blockIdx.y * blockDim.y + start.get(1),\
44 threadIdx.z + blockIdx.z * blockDim.z + start.get(2)};\
45 \
46 if (x[0] > stop.get(0) || x[1] > stop.get(1) || x[2] > stop.get(2))\
47 {return;}
48
49#define GRID_ID_3_TRAW(start,stop) int tx = threadIdx.x + blockIdx.x * blockDim.x + start.get(0);\
50 int ty = threadIdx.y + blockIdx.y * blockDim.y + start.get(1);\
51 int tz = threadIdx.z + blockIdx.z * blockDim.z + start.get(2);\
52 \
53 if (tx > stop.get(0) || ty > stop.get(1) || tz > stop.get(2))\
54 {return;}
55
56#define GRID_ID_3(ite_gpu) grid_key_dx<3,int> key;\
57 key.set_d(0,threadIdx.x + blockIdx.x * blockDim.x + ite_gpu.start.get(0));\
58 key.set_d(1,threadIdx.y + blockIdx.y * blockDim.y + ite_gpu.start.get(1));\
59 key.set_d(2,threadIdx.z + blockIdx.z * blockDim.z + ite_gpu.start.get(2));\
60 \
61 if (key.get(0) > ite_gpu.stop.get(0) || key.get(1) > ite_gpu.stop.get(1) || key.get(2) > ite_gpu.stop.get(2))\
62 {return;}
63
64#define GRID_ID_2(ite_gpu) grid_key_dx<2,int> key;\
65 key.set_d(0,threadIdx.x + blockIdx.x * blockDim.x + ite_gpu.start.get(0));\
66 key.set_d(1,threadIdx.y + blockIdx.y * blockDim.y + ite_gpu.start.get(1));\
67 \
68 if (key.get(0) > ite_gpu.stop.get(0) || key.get(1) > ite_gpu.stop.get(1))\
69 {return;}
70
71#ifdef __NVCC__
72
73template<unsigned int dim, typename ids_type = int>
74struct grid_p
75{
76 __device__ static inline grid_key_dx<dim,ids_type> get_grid_point(const grid_sm<dim,void> & g)
77 {
78 grid_key_dx<dim,ids_type> key;
79
80 key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
81 key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
82
83 unsigned int bz = blockIdx.z * blockDim.z + threadIdx.z;
84 key.set_d(2,bz % g.size(2));
85
86 for (unsigned int i = 3 ; i < dim ; i++)
87 {
88 bz /= g.size(i);
89 key.set_d(i,bz % g.size(i));
90 }
91
92 return key;
93 }
94
95 __device__ static inline grid_key_dx<dim,ids_type> get_grid_point(const openfpm::array<ids_type,dim,unsigned int> & g)
96 {
97 grid_key_dx<dim,ids_type> key;
98
99 key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
100 key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
101
102 unsigned int bz = blockIdx.z * blockDim.z + threadIdx.z;
103 key.set_d(2,bz % g[2]);
104
105 for (unsigned int i = 3 ; i < dim ; i++)
106 {
107 bz /= g[i];
108 key.set_d(i,bz % g[i]);
109 }
110
111 return key;
112 }
113};
114
115template<typename ids_type>
116struct grid_p<3,ids_type>
117{
118 __device__ static inline grid_key_dx<3,ids_type> get_grid_point(const grid_sm<3,void> & g)
119 {
120 grid_key_dx<3,unsigned int> key;
121
122 key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
123 key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
124 key.set_d(2,blockIdx.z * blockDim.z + threadIdx.z);
125
126 return key;
127 }
128
129 __device__ static inline grid_key_dx<3,ids_type> get_grid_point(const openfpm::array<ids_type,3,unsigned int> & g)
130 {
131 grid_key_dx<3,ids_type> key;
132
133 key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
134 key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
135 key.set_d(2,blockIdx.z * blockDim.z + threadIdx.z);
136
137 return key;
138 }
139};
140
141template<typename ids_type>
142struct grid_p<2,ids_type>
143{
144 __device__ static inline grid_key_dx<2,ids_type> get_grid_point(const grid_sm<2,void> & g)
145 {
146 grid_key_dx<2,ids_type> key;
147
148 key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
149 key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
150
151 return key;
152 }
153
154 __device__ static inline grid_key_dx<2,ids_type> get_grid_point(const openfpm::array<ids_type,2,unsigned int> & g)
155 {
156 grid_key_dx<2,ids_type> key;
157
158 key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
159 key.set_d(1,blockIdx.y * blockDim.y + threadIdx.y);
160
161 return key;
162 }
163};
164
165template<typename ids_type>
166struct grid_p<1,ids_type>
167{
168 __device__ static inline grid_key_dx<1,unsigned int> get_grid_point(const grid_sm<1,void> & g)
169 {
170 grid_key_dx<1,unsigned int> key;
171
172 key.set_d(0,blockIdx.x * blockDim.x + threadIdx.x);
173
174 return key;
175 }
176};
177
178#endif
179
180template<unsigned int dim>
181bool has_work_gpu(ite_gpu<dim> & ite)
182{
183 size_t tot_work = 1;
184
185 if (dim == 1)
186 {tot_work *= ite.wthr.x * ite.thr.x;}
187 else if(dim == 2)
188 {
189 tot_work *= ite.wthr.x * ite.thr.x;
190 tot_work *= ite.wthr.y * ite.thr.y;
191 }
192 else
193 {
194 tot_work *= ite.wthr.x * ite.thr.x;
195 tot_work *= ite.wthr.y * ite.thr.y;
196 tot_work *= ite.wthr.z * ite.thr.z;
197 }
198
199 return tot_work != 0;
200}
201
202template<unsigned int dim>
203void move_work_to_blocks(ite_gpu<dim> & ite)
204{
205 if (dim == 1)
206 {
207 ite.wthr.x = ite.wthr.x * ite.thr.x;
208 ite.thr.x = 1;
209 }
210 else if(dim == 2)
211 {
212 ite.wthr.x = ite.wthr.x * ite.thr.x;
213 ite.wthr.y = ite.wthr.y * ite.thr.y;
214 ite.thr.x = 1;
215 ite.thr.y = 1;
216
217 }
218 else
219 {
220 ite.wthr.x = ite.wthr.x * ite.thr.x;
221 ite.wthr.x = ite.wthr.y * ite.thr.y;
222 ite.wthr.x = ite.wthr.z * ite.thr.z;
223 ite.thr.x = 1;
224 ite.thr.y = 1;
225 ite.thr.z = 1;
226 }
227}
228
229#endif
230
231#include "copy_grid_fast.hpp"
232
233template<typename T>
234struct copy_grid_fast_caller
235{
236 template<typename grid_type>
237 static void call(grid_type & gd, const grid_type & gs, const Box<grid_type::dims,size_t> & box_src, const Box<grid_type::dims,size_t> & box_dst)
238 {
239 std::cout << "Error: " << __FILE__ << ":" << __LINE__ << " copy_grid_fast_caller failure" << std::endl;
240 }
241};
242
243template<int ... prp>
244struct copy_grid_fast_caller<index_tuple_sq<prp ...>>
245{
246 template<typename grid_type>
247 static void call(grid_type & gd, const grid_type & gs, const Box<grid_type::dims,size_t> & box_src, const Box<grid_type::dims,size_t> & box_dst)
248 {
249 grid_key_dx<grid_type::dims> cnt[1];
250 cnt[0].zero();
251
252 typedef typename std::remove_reference<decltype(gd)>::type grid_cp;
253 typedef typename std::remove_reference<decltype(gd.getGrid())>::type grid_info_cp;
254
255 typedef typename to_int_sequence<0,grid_type::value_type::max_prop>::type result;
256
257 copy_grid_fast<!is_contiguos<prp...>::type::value || has_pack_gen<typename grid_type::value_type>::value,
258 grid_type::dims,
259 grid_cp,
260 grid_info_cp>::copy(gs.getGrid(),
261 gd.getGrid(),
262 box_src,
263 box_dst,
264 gs,gd,cnt);
265 }
266};
267
268/*! \brief
269 *
270 * Implementation of a N-dimensional grid
271 *
272 * \tparam dim dimansionality of the grid
273 * \tparam T type store by the grid
274 * \tparam S Memory pool from where to take the memory
275 * \tparam layout_base layout memory meta-function (the meta-function used to construct layout_)
276 *
277 */
278template<unsigned int dim,
279 typename T,
280 typename S,
281 template<typename> class layout_base,
282 typename ord_type = grid_sm<dim,void> >
283class grid_base_impl
284{
285 //! memory layout
286 typedef typename layout_base<T>::type layout;
287
288public:
289
290 //! memory layout
291 typedef layout layout_type;
292
293 //! expose the dimansionality as a static const
294 static constexpr unsigned int dims = dim;
295
296 //! Access key
297 typedef grid_key_dx<dim> access_key;
298
299 //! boost::vector that describe the data type
300 typedef typename T::type T_type;
301
302 //! base layout type
303 typedef layout_base<T> layout_base_;
304
305 typedef ord_type linearizer_type;
306
307 typedef T background_type;
308
309protected:
310
311 //! Memory layout specification + memory chunk pointer
312 layout data_;
313
314 //! This is a structure that store all information related to the grid and how indexes are linearized
315 ord_type g1;
316
317private:
318
319 //! Is the memory initialized
320 bool is_mem_init = false;
321
322 //! The memory allocator is not internally created
323 bool isExternal;
324
325#ifdef SE_CLASS1
326
327 /*! \brief Check that the key is inside the grid
328 *
329 *
330 */
331 inline void check_init() const
332 {
333#ifndef __CUDA_ARCH__
334 if (is_mem_init == false)
335 {
336 std::cerr << "Error " << __FILE__ << ":" << __LINE__ << " you must call SetMemory before access the grid\n";
337 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
338 }
339#endif
340 }
341
342 /*! \brief Check that the key is inside the grid
343 *
344 * \param key
345 *
346 */
347 inline void check_bound(const grid_key_dx<dim> & v1) const
348 {
349#ifndef __CUDA_ARCH__
350 for (long int i = 0 ; i < dim ; i++)
351 {
352 if (v1.get(i) >= (long int)getGrid().size(i))
353 {
354 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << v1.get(i) << " >= " << getGrid().size(i) << "\n";
355 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
356 }
357 else if (v1.get(i) < 0)
358 {
359 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << v1.get(i) << " is negative " << "\n";
360 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
361 }
362 }
363#endif
364 }
365
366 /*! \brief Check that the key is inside the grid
367 *
368 * \param key
369 *
370 */
371 inline void check_bound(size_t v1) const
372 {
373#ifndef __CUDA_ARCH__
374 if (v1 >= getGrid().size())
375 {
376 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << v1<< " >= " << getGrid().size() << "\n";
377 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
378 }
379#endif
380 }
381
382 /*! \brief Check that the key is inside the grid
383 *
384 * check if key2 is inside the g grid boundary
385 *
386 * \param g grid
387 * \param key2
388 *
389 */
390 template<typename Mem> inline void check_bound(const grid_base_impl<dim,T,Mem,layout_base, ord_type> & g,const grid_key_dx<dim> & key2) const
391 {
392#ifndef __CUDA_ARCH__
393 for (size_t i = 0 ; i < dim ; i++)
394 {
395 if (key2.get(i) >= (long int)g.getGrid().size(i))
396 {
397 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " >= " << g.getGrid().size(i) << "\n";
398 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
399 }
400 else if (key2.get(i) < 0)
401 {
402 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " is negative " << "\n";
403 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
404 }
405 }
406#endif
407 }
408
409 /*! \brief Check that the key is inside the grid
410 *
411 * check if key2 is inside the g grid boundary
412 *
413 * \param g grid
414 * \param key2
415 *
416 */
417 template<typename Mem, template <typename> class layout_base2>
418 inline void check_bound(const grid_base_impl<dim,T,Mem,layout_base2,ord_type> & g,const grid_key_dx<dim> & key2) const
419 {
420#ifndef __CUDA_ARCH__
421 for (size_t i = 0 ; i < dim ; i++)
422 {
423 if (key2.get(i) >= (long int)g.getGrid().size(i))
424 {
425 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " >= " << g.getGrid().size(i) << "\n";
426 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
427 }
428 else if (key2.get(i) < 0)
429 {
430 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << "x=[" << i << "]=" << key2.get(i) << " is negative " << "\n";
431 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
432 }
433 }
434#endif
435 }
436
437 /*! \brief Check that the key is inside the grid
438 *
439 * check if key2 is inside the g grid boundary
440 *
441 * \param g grid
442 * \param key2
443 *
444 */
445 template<typename Mem> inline void check_bound(const grid_base_impl<dim,T,Mem,layout_base> & g,const size_t & key2) const
446 {
447#ifndef __CUDA_ARCH__
448 if (key2 >= g.getGrid().size())
449 {
450 std::cerr << "Error " __FILE__ << ":" << __LINE__ <<" grid overflow " << key2 << " >= " << getGrid().size() << "\n";
451 ACTION_ON_ERROR(GRID_ERROR_OBJECT);
452 }
453#endif
454 }
455
456#endif
457
458 void resize_impl_device(const size_t (& sz)[dim],grid_base_impl<dim,T,S,layout_base,ord_type> & grid_new, unsigned int blockSize = 1)
459 {
460#if defined(CUDA_GPU) && defined(__NVCC__)
461
462 grid_key_dx<dim> start;
463 grid_key_dx<dim> stop;
464
465 for (size_t i = 0 ; i < dim ; i++)
466 {
467 start.set_d(i,0);
468
469 // take the smallest
470 if (grid_new.g1.size(i) < sz[i])
471 {stop.set_d(i,grid_new.g1.size(i)-1);}
472 else
473 {stop.set_d(i,sz[i]-1);}
474 }
475
476// if (dim == 1)
477// {
478// copy_fast_1d_device_memory<is_layout_mlin<layout_base<T>>::value,decltype(grid_new.data_),S> cf1dm(data_,grid_new.data_);
479
480// boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(cf1dm);
481// }
482 if (dim <= 3)
483 {
484 auto ite = this->getGPUIterator(start,stop);
485 bool has_work = has_work_gpu(ite);
486
487 if (has_work == true)
488 {
489 if (blockSize == 1)
490 {
491 CUDA_LAUNCH((copy_ndim_grid_device<dim,decltype(grid_new.toKernel())>),ite,this->toKernel(),grid_new.toKernel());
492 }
493 else
494 {
495 move_work_to_blocks(ite);
496
497 ite.thr.x = blockSize;
498
499 CUDA_LAUNCH((copy_ndim_grid_block_device<dim,decltype(grid_new.toKernel())>),ite,this->toKernel(),grid_new.toKernel());
500 }
501 }
502 }
503 else
504 {
505 grid_key_dx<1> start;
506 start.set_d(0,0);
507 grid_key_dx<1> stop({});
508 stop.set_d(0,this->g1.size());
509
510 size_t sz[1];
511 sz[0]= this->g1.size();
512
513 grid_sm<1,void> g_sm_copy(sz);
514
515 auto ite = getGPUIterator_impl<1>(g_sm_copy,start,stop);
516
517 CUDA_LAUNCH((copy_ndim_grid_device<dim,decltype(grid_new.toKernel())>),ite,this->toKernel(),grid_new.toKernel());
518 }
519#else
520
521 std::cout << __FILE__ << ":" << __LINE__ << " error: the function resize require the launch of a kernel, but it seem that this" <<
522 " file (grid_base_implementation.hpp) has not been compiled with NVCC " << std::endl;
523
524#endif
525 }
526
527 void resize_impl_host(const size_t (& sz)[dim], grid_base_impl<dim,T,S,layout_base,ord_type> & grid_new)
528 {
529 size_t sz_c[dim];
530 for (size_t i = 0 ; i < dim ; i++)
531 {sz_c[i] = (g1.size(i) < sz[i])?g1.size(i):sz[i];}
532
533 grid_sm<dim,void> g1_c(sz_c);
534
535 //! create a source grid iterator
536 grid_key_dx_iterator<dim> it(g1_c);
537
538 while(it.isNext())
539 {
540 // get the grid key
541 grid_key_dx<dim> key = it.get();
542
543 // create a copy element
544
545 grid_new.get_o(key) = this->get_o(key);
546
547 ++it;
548 }
549 }
550
551 void resize_impl_memset(grid_base_impl<dim,T,S,layout_base,ord_type> & grid_new)
552 {
553 //! Set the allocator and allocate the memory
554 if (isExternal == true)
555 {
556 mem_setext<typename std::remove_reference<decltype(grid_new)>::type,S,layout_base<T>,decltype(data_)>::set(grid_new,*this,this->data_);
557 }
558 else
559 grid_new.setMemory();
560 }
561
562public:
563
564 // Implementation of packer and unpacker for grid
565 #include "grid_pack_unpack.ipp"
566
567 //! it define that this data-structure is a grid
568 typedef int yes_i_am_grid;
569
570 //! Definition of the layout
571 typedef typename memory_traits_lin<typename T::type>::type memory_lin;
572
573 //! Object container for T, it is the return type of get_o it return a object type trough
574 // you can access all the properties of T
575 typedef encapc<dim,T,layout> container;
576
577 //! The object type the grid is storing
578 typedef T value_type;
579
580 //! Default constructor
581 grid_base_impl() THROW
582 :g1(0),isExternal(false)
583 {
584 // Add this pointer
585 }
586
587 /*! \brief create a grid from another grid
588 *
589 *
590 * \param g the grid to copy
591 *
592 */
593 grid_base_impl(const grid_base_impl & g) THROW
594 :isExternal(false)
595 {
596 this->operator=(g);
597 }
598
599 /*! \brief create a grid of size sz on each direction
600 *
601 * \param sz size of the grid on each dimensions
602 *
603 */
604 grid_base_impl(const size_t & sz) THROW
605 :g1(sz),isExternal(false)
606 {
607 // Add this pointer
608 }
609
610 /*! \brief Constructor
611 *
612 * It construct a grid of specified size
613 *
614 * \param sz array that indicate the size of the grid in each dimension
615 *
616 */
617 grid_base_impl(const size_t (& sz)[dim]) THROW
618 :g1(sz),isExternal(false)
619 {
620 // Add this pointer
621 }
622
623 //! Destructor
624 ~grid_base_impl() THROW
625 {
626 // delete this pointer
627 }
628
629 /*! \brief It copy a grid
630 *
631 * \param g grid to copy
632 *
633 * \return itself
634 *
635 */
636 grid_base_impl<dim,T,S,layout_base> & operator=(const grid_base_impl<dim,T,S,layout_base> & g)
637 {
638 swap(g.duplicate());
639
640 return *this;
641 }
642
643 /*! \brief It copy a grid
644 *
645 * \param g grid to copy
646 *
647 * \return itself
648 *
649 */
650 grid_base_impl<dim,T,S,layout_base> & operator=(grid_base_impl<dim,T,S,layout_base> && g)
651 {
652 swap(g);
653
654 return *this;
655 }
656
657 /*! \brief Compare two grids
658 *
659 * \param g grid to check
660 *
661 * \return true if they match
662 *
663 */
664 bool operator==(const grid_base_impl<dim,T,S,layout_base> & g)
665 {
666 // check if the have the same size
667 if (g1 != g.g1)
668 return false;
669
670 auto it = getIterator();
671
672 while (it.isNext())
673 {
674 auto key = it.get();
675
676 if (this->get_o(key) != this->get_o(key))
677 return false;
678
679 ++it;
680 }
681
682 return true;
683 }
684
685 /*! \brief create a duplicated version of the grid
686 *
687 * \return a duplicated version of the grid
688 *
689 */
690 grid_base_impl<dim,T,S,layout_base> duplicate() const THROW
691 {
692 //! Create a completely new grid with sz
693
694 grid_base_impl<dim,T,S,layout_base> grid_new(g1.getSize());
695
696 //! Set the allocator and allocate the memory
697 grid_new.setMemory();
698
699 // We know that, if it is 1D we can safely copy the memory
700// if (dim == 1)
701// {
702 //! 1-D copy (This case is simple we use raw memory copy because is the fastest option)
703// grid_new.data_.mem->copy(*data_.mem);
704// }
705// else
706// {
707 //! N-D copy
708
709 //! create a source grid iterator
710 grid_key_dx_iterator<dim> it(g1);
711
712 while(it.isNext())
713 {
714 grid_new.set(it.get(),*this,it.get());
715
716 ++it;
717 }
718// }
719
720 // copy grid_new to the base
721
722 return grid_new;
723 }
724
725#ifdef CUDA_GPU
726
727 /*! \brief Get an iterator for the GPU
728 *
729 * \param start starting point
730 * \param stop end point
731 *
732 */
733 struct ite_gpu<dim> getGPUIterator(grid_key_dx<dim,long int> & key1, grid_key_dx<dim,long int> & key2, size_t n_thr = 1024) const
734 {
735 return getGPUIterator_impl<dim>(g1,key1,key2,n_thr);
736 }
737#endif
738
739 /*! \brief Return the internal grid information
740 *
741 * Return the internal grid information
742 *
743 * \return the internal grid
744 *
745 */
746
747 const ord_type & getGrid() const
748 {
749 return g1;
750 }
751
752 /*! \brief Create the object that provide memory
753 *
754 * Create the object that provide memory
755 *
756 * \tparam S memory type to allocate
757 *
758 */
759
760 void setMemory()
761 {
762 mem_setm<S,layout_base<T>,decltype(this->data_),decltype(this->g1)>::setMemory(data_,g1,is_mem_init);
763 }
764
765 /*! \brief Set the object that provide memory from outside
766 *
767 * An external allocator is useful with allocator like PreAllocHeapMem
768 * to have contiguous in memory vectors. Or to force the system to retain
769 * memory
770 *
771 * \tparam S memory type
772 *
773 * \param m external memory allocator
774 *
775 */
776 template<unsigned int p = 0> void setMemory(S & m)
777 {
778 //! Is external
779 isExternal = true;
780
781 //! Create and set the memory allocator
782// data_.setMemory(m);
783
784 //! Allocate the memory and create the reppresentation
785// if (g1.size() != 0) data_.allocate(g1.size());
786
787 bool skip_ini = skip_init<has_noPointers<T>::value,T>::skip_();
788
789 mem_setmemory<decltype(data_),S,layout_base<T>>::template setMemory<p>(data_,m,g1.size(),skip_ini);
790
791 is_mem_init = true;
792 }
793
794 /*! \brief Set the object that provide memory from outside
795 *
796 * An external allocator is useful with allocator like PreAllocHeapMem
797 * to have contiguous in memory vectors. Or to force the system to retain
798 * memory
799 *
800 * \tparam S memory type
801 *
802 * \param m external memory allocator
803 *
804 */
805 void setMemoryArray(S * m)
806 {
807 //! Is external
808 isExternal = true;
809
810 bool skip_ini = skip_init<has_noPointers<T>::value,T>::skip_();
811
812 mem_setmemory<decltype(data_),S,layout_base<T>>::template setMemoryArray(*this,m,g1.size(),skip_ini);
813
814 is_mem_init = true;
815 }
816
817 /*! \brief Return a plain pointer to the internal data
818 *
819 * Return a plain pointer to the internal data
820 *
821 * \return plain data pointer
822 *
823 */
824
825 template<unsigned int p = 0> void * getPointer()
826 {
827 return mem_getpointer<decltype(data_),layout_base_>::template getPointer<p>(data_);
828 }
829
830 /*! \brief Return a plain pointer to the internal data
831 *
832 * Return a plain pointer to the internal data
833 *
834 * \return plain data pointer
835 *
836 */
837
838 template<unsigned int p = 0> const void * getPointer() const
839 {
840 return mem_getpointer<decltype(data_),layout_base_>::template getPointer<p>(data_);
841 }
842
843
844 /*! \brief In this case insert is equivalent to get
845 *
846 * \param v1 grid_key that identify the element in the grid
847 *
848 * \return the reference of the element
849 *
850 */
851 template <unsigned int p, typename r_type=decltype(layout_base<T>::template get<p>(data_,g1,grid_key_dx<dim>()))>
852 inline r_type insert(const grid_key_dx<dim> & v1)
853 {
854#ifdef SE_CLASS1
855 check_init();
856 check_bound(v1);
857#endif
858 return this->get<p>(v1);
859 }
860
861
862 /*! \brief Get the reference of the selected element
863 *
864 * \param v1 grid_key that identify the element in the grid
865 *
866 * \return the reference of the element
867 *
868 */
869 template <unsigned int p, typename r_type=decltype(layout_base<T>::template get<p>(data_,g1,grid_key_dx<dim>()))>
870 __device__ __host__ inline r_type get_usafe(const grid_key_dx<dim> & v1)
871 {
872#ifdef SE_CLASS1
873 check_init();
874#endif
875 return layout_base<T>::template get<p>(data_,g1,v1);
876 }
877
878 /*! \brief Get the const reference of the selected element
879 *
880 * \param v1 grid_key that identify the element in the grid
881 *
882 * \return the const reference of the element
883 *
884 */
885 template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_c<p>(data_,g1,grid_key_dx<dim>()))>
886 __device__ __host__ inline r_type get_unsafe(const grid_key_dx<dim> & v1) const
887 {
888#ifdef SE_CLASS1
889 check_init();
890#endif
891 return layout_base<T>::template get_c<p>(data_,g1,v1);
892 }
893
894 /*! \brief Get the reference of the selected element
895 *
896 * \param v1 grid_key that identify the element in the grid
897 *
898 * \return the reference of the element
899 *
900 */
901 template <unsigned int p, typename r_type=decltype(layout_base<T>::template get<p>(data_,g1,grid_key_dx<dim>()))>
902 __device__ __host__ inline r_type get(const grid_key_dx<dim> & v1)
903 {
904#ifdef SE_CLASS1
905 check_init();
906 check_bound(v1);
907#endif
908 return layout_base<T>::template get<p>(data_,g1,v1);
909 }
910
911 /*! \brief Get the point flag (in this case just return 0)
912 *
913 * \param v1 grid_key that identify the element in the grid
914 *
915 * \return zero
916 *
917 */
918 __device__ __host__ inline unsigned char getFlag(const grid_key_dx<dim> & v1) const
919 {
920 return 0;
921 }
922
923 /*! \brief Get the const reference of the selected element
924 *
925 * \param v1 grid_key that identify the element in the grid
926 *
927 * \return the const reference of the element
928 *
929 */
930 template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_c<p>(data_,g1,grid_key_dx<dim>()))>
931 __device__ __host__ inline r_type get(const grid_key_dx<dim> & v1) const
932 {
933#ifdef SE_CLASS1
934 check_init();
935 check_bound(v1);
936#endif
937 return layout_base<T>::template get_c<p>(data_,g1,v1);
938 }
939
940 /*! \brief Get the reference of the selected element
941 *
942 * \param lin_id linearized element that identify the element in the grid
943 *
944 * \return the reference of the element
945 *
946 */
947 template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_lin<p>(data_,g1,0))>
948 __device__ __host__ inline r_type get(const size_t lin_id)
949 {
950#ifdef SE_CLASS1
951 check_init();
952 check_bound(lin_id);
953#endif
954 return layout_base<T>::template get_lin<p>(data_,g1,lin_id);
955 }
956
957 /*! \brief Get the const reference of the selected element
958 *
959 * \param lin_id linearized element that identify the element in the grid
960 *
961 * \return the const reference of the element
962 *
963 */
964 template <unsigned int p, typename r_type=decltype(layout_base<T>::template get_lin<p>(data_,g1,0))>
965 __device__ __host__ inline const r_type get(size_t lin_id) const
966 {
967#ifdef SE_CLASS1
968 check_init();
969 check_bound(lin_id);
970#endif
971 return layout_base<T>::template get_lin_const(data_,g1,lin_id);
972 }
973
974
975 /*! \brief Get the of the selected element as a boost::fusion::vector
976 *
977 * Get the selected element as a boost::fusion::vector
978 *
979 * \param v1 grid_key that identify the element in the grid
980 *
981 * \see encap_c
982 *
983 * \return an encap_c that is the representation of the object (careful is not the object)
984 *
985 */
986 inline encapc<dim,T,layout> get_o(const grid_key_dx<dim> & v1)
987 {
988#ifdef SE_CLASS1
989 check_init();
990 check_bound(v1);
991#endif
992 return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get(data_,g1,v1);
993 }
994
995 /*! \brief Get the of the selected element as a boost::fusion::vector
996 *
997 * Get the selected element as a boost::fusion::vector
998 *
999 * \param v1 grid_key that identify the element in the grid
1000 *
1001 * \see encap_c
1002 *
1003 * \return an encap_c that is the representation of the object (careful is not the object)
1004 *
1005 */
1006 inline const encapc<dim,T,layout> get_o(const grid_key_dx<dim> & v1) const
1007 {
1008#ifdef SE_CLASS1
1009 check_init();
1010 check_bound(v1);
1011#endif
1012 return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>
1013 ::get(const_cast<typename std::add_lvalue_reference<decltype(this->data_)>::type>(data_),
1014 g1,v1);
1015 }
1016
1017
1018 /*! \brief Get the of the selected element as a boost::fusion::vector
1019 *
1020 * Get the selected element as a boost::fusion::vector
1021 *
1022 * \param v1 grid_key that identify the element in the grid
1023 *
1024 * \see encap_c
1025 *
1026 * \return an encap_c that is the representation of the object (careful is not the object)
1027 *
1028 */
1029 inline encapc<dim,T,layout> insert_o(const grid_key_dx<dim> & v1)
1030 {
1031#ifdef SE_CLASS1
1032 check_init();
1033 check_bound(v1);
1034#endif
1035 return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get(data_,g1,v1);
1036 }
1037
1038 /*! \brief Get the of the selected element as a boost::fusion::vector
1039 *
1040 * Get the selected element as a boost::fusion::vector
1041 *
1042 * \param v1 linearized id that identify the element in the grid
1043 *
1044 * \see encap_c
1045 *
1046 * \return an encap_c that is the representation of the object (careful is not the object)
1047 *
1048 */
1049 inline encapc<dim,T,layout> get_o(size_t v1)
1050 {
1051#ifdef SE_CLASS1
1052 check_init();
1053 check_bound(v1);
1054#endif
1055 return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>::get_lin(data_,v1);
1056 }
1057
1058 /*! \brief Get the of the selected element as a boost::fusion::vector
1059 *
1060 * Get the selected element as a boost::fusion::vector
1061 *
1062 * \param v1 linearized id that identify the element in the grid
1063 *
1064 * \see encap_c
1065 *
1066 * \return an encap_c that is the representation of the object (careful is not the object)
1067 *
1068 */
1069 inline const encapc<dim,T,layout> get_o(size_t v1) const
1070 {
1071#ifdef SE_CLASS1
1072 check_init();
1073 check_bound(v1);
1074#endif
1075 return mem_geto<dim,T,layout_base<T>,decltype(this->data_),decltype(this->g1),decltype(v1)>
1076 ::get_lin(const_cast<typename std::add_lvalue_reference<decltype(this->data_)>::type>(data_),v1);
1077 }
1078
1079 /*! \brief Fill the memory with the selected byte
1080 *
1081 * \warning It is a low level memory operation it ignore any type and semantic safety
1082 *
1083 * \param fl byte pattern to fill
1084 *
1085 */
1086 template<int prp>
1087 void fill(unsigned char fl)
1088 {
1089 if (prp != 0 || is_layout_mlin<layout_base<T>>::type::value == false)
1090 {
1091 std::cout << "Error: " << __FILE__ << ":" << __LINE__ << " unsupported fill operation " << std::endl;
1092 }
1093
1094 memset(getPointer(),fl,size() * sizeof(T));
1095 }
1096
1097 /*! \brief Remove all the points in this region
1098 *
1099 * For this case this function does nothing
1100 *
1101 * \param box_src box to kill the points
1102 *
1103 */
1104 void remove(Box<dim,long int> & section_to_delete)
1105 {}
1106
1107 /*! \brief Reset the queue to remove and copy section of grids
1108 *
1109 * \note for this particular implementation it does nothing
1110 *
1111 */
1112 void copyRemoveReset()
1113 {
1114 }
1115
1116 /*! \brief This function check if keep geometry is possible for this grid
1117 *
1118 * \return false it does not exist this feature on this type of grids
1119 *
1120 */
1121 bool isSkipLabellingPossible()
1122 {
1123 return false;
1124 }
1125
1126 /*! \brief copy an external grid into a specific place into this grid
1127 *
1128 * It copy the area indicated by the box_src from grid_src into this grid
1129 * at the place box_dst. The volume of box_src and box_dst
1130 *
1131 * box_dst and box_src are adjusted if box_dst overflow the destination grid
1132 *
1133 * \param grid_src source grid
1134 * \param box_src source box
1135 * \param box_dst destination box
1136 *
1137 */
1138 void copy_to(const grid_base_impl<dim,T,S,layout_base> & grid_src,
1139 const Box<dim,long int> & box_src,
1140 const Box<dim,long int> & box_dst)
1141 {
1142 // fix box_dst
1143
1144 Box<dim,size_t> box_src_;
1145 Box<dim,size_t> box_dst_;
1146
1147 for (size_t i = 0 ; i < dim ; i++)
1148 {
1149 if (box_dst.getHigh(i) >= (long int)g1.size(i))
1150 {
1151 long int shift = box_dst.getHigh(i) - g1.size(i) + 1;
1152 box_dst_.setHigh(i,box_dst.getHigh(i) - shift);
1153 box_src_.setHigh(i,box_src.getHigh(i) - shift);
1154 }
1155 else
1156 {
1157 box_dst_.setHigh(i,box_dst.getHigh(i));
1158 box_src_.setHigh(i,box_src.getHigh(i));
1159 }
1160
1161 if (box_dst.getLow(i) < 0)
1162 {
1163 long int shift = -box_dst.getLow(i);
1164 box_dst_.setLow(i,box_dst.getLow(i) - shift);
1165 box_src_.setLow(i,box_src.getLow(i) - shift);
1166 }
1167 else
1168 {
1169 box_dst_.setLow(i,box_dst.getLow(i));
1170 box_src_.setLow(i,box_src.getLow(i));
1171 }
1172 }
1173
1174 typedef typename to_int_sequence<0,T::max_prop>::type result;
1175
1176 copy_grid_fast_caller<result>::call(*this,grid_src,box_src_,box_dst_);
1177
1178/* copy_grid_fast<!is_contiguos<prp...>::type::value || has_pack_gen<typename device_grid::value_type>::value,
1179 dim,
1180 grid_cp,
1181 grid_info_cp>::copy(grid_src.getGrid(),
1182 this->getGrid(),
1183 box_src,
1184 box_dst,
1185 grid_src,*this,cnt);*/
1186
1187 /////////////////////////////////////////
1188 }
1189
1190 /*! \brief copy an external grid into a specific place into this grid
1191 *
1192 * It copy the area indicated by the box_src from grid_src into this grid
1193 * at the place box_dst. The volume of box_src and box_dst
1194 *
1195 * \param grid_src source grid
1196 * \param box_src source box
1197 * \param box_dst destination box
1198 *
1199 */
1200 template<unsigned int ... prp>
1201 void copy_to_prp(const grid_base_impl<dim,T,S,layout_base> & grid_src,
1202 const Box<dim,size_t> & box_src,
1203 const Box<dim,size_t> & box_dst)
1204 {
1205 typedef typename std::remove_reference<decltype(grid_src)>::type grid_cp;
1206 typedef typename std::remove_reference<decltype(grid_src.getGrid())>::type grid_info_cp;
1207
1208 grid_key_dx<dim> cnt[1];
1209 cnt[0].zero();
1210
1211 copy_grid_fast<!is_contiguos<prp...>::type::value || has_pack_gen<T>::value,
1212 dim,
1213 grid_cp,
1214 grid_info_cp>::copy(grid_src.getGrid(),
1215 this->getGrid(),
1216 box_src,
1217 box_dst,
1218 grid_src,*this,cnt);
1219 }
1220
1221 /*! \brief It does nothing
1222 *
1223 *
1224 *
1225 */
1226 void clear()
1227 {}
1228
1229 /*! \brief copy an external grid into a specific place into this grid
1230 *
1231 * It copy the area indicated by the box_src from grid_src into this grid
1232 * at the place box_dst. The volume of box_src and box_dst
1233 *
1234 * \param grid_src source grid
1235 * \param box_src source box
1236 * \param box_dst destination box
1237 *
1238 */
1239 template<template<typename,typename> class op, unsigned int ... prp>
1240 void copy_to_op(const grid_base_impl<dim,T,S,layout_base> & gs,
1241 const Box<dim,size_t> & bx_src,
1242 const Box<dim,size_t> & bx_dst)
1243 {
1244 grid_key_dx_iterator_sub<dim> sub_src(gs.getGrid(),bx_src.getKP1(),bx_src.getKP2());
1245 grid_key_dx_iterator_sub<dim> sub_dst(this->getGrid(),bx_dst.getKP1(),bx_dst.getKP2());
1246
1247 while (sub_src.isNext())
1248 {
1249 // write the object in the last element
1250 object_si_di_op<op,decltype(gs.get_o(sub_src.get())),decltype(this->get_o(sub_dst.get())),OBJ_ENCAP,prp...>(gs.get_o(sub_src.get()),this->get_o(sub_dst.get()));
1251
1252 ++sub_src;
1253 ++sub_dst;
1254 }
1255 }
1256
1257 /*! \brief Indicate that unpacking the header is supported
1258 *
1259 * \return false
1260 *
1261 */
1262 static bool is_unpack_header_supported()
1263 {return false;}
1264
1265 /*! \brief Resize the grid
1266 *
1267 * Resize the grid to the old information is retained on the new grid,
1268 * if the new grid is bigger. if is smaller the data are cropped
1269 *
1270 * \param sz reference to an array of dimension dim
1271 * \param opt options for resize. In case we know that the data are only on device memory we can use DATA_ONLY_DEVICE,
1272 * In case we know that the data are only on host memory we can use DATA_ONLY_HOST
1273 *
1274 * \param blockSize The default is equal to 1. In case of accelerator buffer resize indicate the size of the block of
1275 * threads ( this is used in case of a vector of blocks where the block object override to operator=
1276 * to distribute threads on each block element )
1277 *
1278 */
1279 void resize(const size_t (& sz)[dim], size_t opt = DATA_ON_HOST | DATA_ON_DEVICE, unsigned int blockSize = 1)
1280 {
1281 //! Create a completely new grid with sz
1282
1283 grid_base_impl<dim,T,S,layout_base,ord_type> grid_new(sz);
1284
1285 resize_impl_memset(grid_new);
1286
1287
1288 // We know that, if it is 1D we can safely copy the memory
1289// if (dim == 1)
1290// {
1291// //! 1-D copy (This case is simple we use raw memory copy because is the fastest option)
1292// grid_new.data_.mem->copy(*data_.mem);
1293// }
1294// else
1295// {
1296 // It should be better to separate between fast and slow cases
1297
1298 //! N-D copy
1299
1300 if (opt & DATA_ON_HOST)
1301 {resize_impl_host(sz,grid_new);}
1302
1303 if (opt & DATA_ON_DEVICE && S::isDeviceHostSame() == false)
1304 {resize_impl_device(sz,grid_new,blockSize);}
1305
1306// }
1307
1308 // copy grid_new to the base
1309
1310 this->swap(grid_new);
1311 }
1312
1313 /*! \brief Resize the space
1314 *
1315 * Resize the space to a new grid, the element are retained on the new grid,
1316 * if the new grid is bigger the new element are now initialized, if is smaller
1317 * the data are cropped
1318 *
1319 * \param sz reference to an array of dimension dim
1320 * \param opt options for resize. In case we know that the data are only on device memory we can use DATA_ONLY_DEVICE,
1321 * In case we know that the data are only on host memory we can use DATA_ONLY_HOST
1322 *
1323 */
1324 void resize_no_device(const size_t (& sz)[dim])
1325 {
1326 //! Create a completely new grid with sz
1327
1328 grid_base_impl<dim,T,S,layout_base> grid_new(sz);
1329
1330 resize_impl_memset(grid_new);
1331 resize_impl_host(sz,grid_new);
1332
1333 this->swap(grid_new);
1334 }
1335
1336 /*! \brief Remove one element valid only on 1D
1337 *
1338 * \param key element to remove
1339 *
1340 */
1341 void remove(size_t key)
1342 {
1343 if (dim != 1)
1344 {
1345#ifdef SE_CLASS1
1346 std::cerr << "Error: " << __FILE__ << " " << __LINE__ << " remove work only on dimension == 1 " << "\n";
1347#endif
1348 return;
1349 }
1350
1351 // It is safe to do a memory copy
1352
1353 data_.move(&this->template get<0>());
1354 }
1355
1356 /*! \brief It swap the objects A become B and B become A using A.swap(B);
1357 *
1358 * This is a different from the standard swap and require long explanation.
1359 *
1360 * This object by default when it construct after we call setMemory() it create an internal memory object
1361 * and use it allocate memory internally.
1362 *
1363 * If instead we use setMemory(external_mem) this object does not create an internal memory object but use
1364 * the passed object to allocate memory. Because the external memory can already have a pool of memory preallocated
1365 * we can re-use the memory.
1366 *
1367 * Despite this setMemory can be used to do memory retaining/re-use and/or garbage collection.
1368 * It can be seen from a different prospective of making the data structures act like a representation of external
1369 * memory. De facto we are giving meaning to the external memory so we are shaping or re-shaping pre-existing
1370 * external memory.
1371 *
1372 * In the following I will call these two mode Mode1 and Mode2
1373 *
1374 * Using the structure in this way has consequences, because now in Mode2 the memory (and so its life-span) is disentangled
1375 * by its structure.
1376 *
1377 *
1378 * The main difference comes when we swap object in which one of both are in Mode2
1379 *
1380 * Let's suppose object A is in Mode1 and object B is is Mode2. The normal swap, fully swap the objects
1381 *
1382 * A.swap(B) A become B (in mode 2) and B become A (in mode 1)
1383 *
1384 * A.swap_nomode(B) In this case the mode is not swapped A become B (in mode 1) and B become A (in mode 2).
1385 * So the mode is not swapped and remain the original
1386 *
1387 * \param grid object B
1388 *
1389 */
1390
1391 void swap_nomode(grid_base_impl<dim,T,S,layout_base> & grid)
1392 {
1393 mem_swap<T,layout_base<T>,decltype(data_),decltype(grid)>::template swap_nomode<S>(data_,grid.data_);
1394
1395 // exchange the grid info
1396 g1.swap(grid.g1);
1397
1398 // exchange the init status
1399 bool exg = is_mem_init;
1400 is_mem_init = grid.is_mem_init;
1401 grid.is_mem_init = exg;
1402 }
1403
1404 /*! \brief It swap the objects A become B and B become A using A.swap(B);
1405 *
1406 * \param grid to swap with
1407 *
1408 */
1409
1410 void swap(grid_base_impl<dim,T,S,layout_base,ord_type> & grid)
1411 {
1412 mem_swap<T,layout_base<T>,decltype(data_),decltype(grid)>::swap(data_,grid.data_);
1413
1414 // exchange the grid info
1415 g1.swap(grid.g1);
1416
1417 // exchange the init status
1418 bool exg = is_mem_init;
1419 is_mem_init = grid.is_mem_init;
1420 grid.is_mem_init = exg;
1421
1422 // exchange the is external status
1423 exg = isExternal;
1424 isExternal = grid.isExternal;
1425 grid.isExternal = exg;
1426 }
1427
1428 /*! \brief It move the allocated object from one grid to another
1429 *
1430 * It move the allocated object from one grid to another, after this
1431 * call the argument grid is no longer valid
1432 *
1433 * \param grid to move/copy
1434 *
1435 */
1436
1437 void swap(grid_base_impl<dim,T,S,layout_base> && grid)
1438 {
1439 swap(grid);
1440 }
1441
1442 /*! \brief set only some properties
1443 *
1444 * \param key1 destination point
1445 * \param g source
1446 * \param key2 source point
1447 */
1448 template<unsigned int ... prp>
1449 __device__ __host__ inline void set(const grid_key_dx<dim> & key1,const grid_base_impl & g, const grid_key_dx<dim> & key2)
1450 {
1451 auto edest = this->get_o(key1);
1452 auto esrc = g.get_o(key2);
1453
1454 copy_cpu_encap_encap_prp<decltype(g.get_o(key2)),decltype(this->get_o(key1)),prp...> ec(esrc,edest);
1455
1456 boost::mpl::for_each_ref<boost::mpl::range_c<int,0,sizeof...(prp)>>(ec);
1457 }
1458
1459 /*! \brief set an element of the grid
1460 *
1461 * set an element of the grid
1462 *
1463 * \param dx is the grid key or the position to set
1464 * \param obj value to set
1465 *
1466 */
1467 template<typename Memory> inline void set(grid_key_dx<dim> dx, const encapc<1,T,Memory> & obj)
1468 {
1469#ifdef SE_CLASS1
1470 check_init();
1471 check_bound(dx);
1472#endif
1473
1474 // create the object to copy the properties
1475 copy_cpu_encap<dim,grid_base_impl<dim,T,S,layout_base>,layout> cp(dx,*this,obj);
1476
1477 // copy each property
1478 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
1479 }
1480
1481 /*! \brief set an element of the grid
1482 *
1483 * set an element of the grid
1484 *
1485 * \param dx is the grid key or the position to set
1486 * \param obj value to set
1487 *
1488 */
1489
1490 inline void set(grid_key_dx<dim> dx, const T & obj)
1491 {
1492#ifdef SE_CLASS1
1493 check_init();
1494 check_bound(dx);
1495#endif
1496
1497 this->get_o(dx) = obj;
1498 }
1499
1500
1501 /*! \brief Set an element of the grid from another element of another grid
1502 *
1503 * \param key1 element of the grid to set
1504 * \param g source grid
1505 * \param key2 element of the source grid to copy
1506 *
1507 */
1508
1509 inline void set(const grid_key_dx<dim> & key1,
1510 const grid_base_impl<dim,T,S,layout_base> & g,
1511 const grid_key_dx<dim> & key2)
1512 {
1513#ifdef SE_CLASS1
1514 check_init();
1515 check_bound(key1);
1516 check_bound(g,key2);
1517#endif
1518
1519 this->get_o(key1) = g.get_o(key2);
1520 }
1521
1522 /*! \brief Set an element of the grid from another element of another grid
1523 *
1524 * \param key1 element of the grid to set
1525 * \param g source grid
1526 * \param key2 element of the source grid to copy
1527 *
1528 */
1529
1530 inline void set(const size_t key1,
1531 const grid_base_impl<dim,T,S,layout_base> & g,
1532 const size_t key2)
1533 {
1534#ifdef SE_CLASS1
1535 check_init();
1536 check_bound(key1);
1537 check_bound(g,key2);
1538#endif
1539
1540 this->get_o(key1) = g.get_o(key2);
1541 }
1542
1543 /*! \brief Set an element of the grid from another element of another grid
1544 *
1545 * \param key1 element of the grid to set
1546 * \param g source grid
1547 * \param key2 element of the source grid to copy
1548 *
1549 */
1550
1551 template<typename Mem> inline void set(const grid_key_dx<dim> & key1,const grid_base_impl<dim,T,Mem,layout_base> & g, const grid_key_dx<dim> & key2)
1552 {
1553#ifdef SE_CLASS1
1554 check_init();
1555 check_bound(key1);
1556 check_bound(g,key2);
1557#endif
1558
1559 this->get_o(key1) = g.get_o(key2);
1560 }
1561
1562 /*! \brief Set an element of the grid from another element of another grid
1563 *
1564 * \param key1 element of the grid to set
1565 * \param g source grid
1566 * \param key2 element of the source grid to copy
1567 *
1568 */
1569
1570 template<typename Mem, template <typename> class layout_base2> inline void set_general(const grid_key_dx<dim> & key1,
1571 const grid_base_impl<dim,T,Mem,layout_base2> & g,
1572 const grid_key_dx<dim> & key2)
1573 {
1574#ifdef SE_CLASS1
1575 check_init();
1576 check_bound(key1);
1577 check_bound(g,key2);
1578#endif
1579
1580 this->get_o(key1) = g.get_o(key2);
1581 }
1582
1583 /*! \brief return the size of the grid
1584 *
1585 * \return Return the size of the grid
1586 *
1587 */
1588 inline size_t size() const
1589 {
1590 return g1.size();
1591 }
1592
1593 /*! \brief Return a sub-grid iterator
1594 *
1595 * Return a sub-grid iterator, to iterate through the grid
1596 *
1597 * \param start start point
1598 * \param stop stop point
1599 *
1600 * \return a sub-grid iterator
1601 *
1602 */
1603 inline grid_key_dx_iterator_sub<dim> getSubIterator(const grid_key_dx<dim> & start, const grid_key_dx<dim> & stop) const
1604 {
1605 return g1.getSubIterator(start,stop);
1606 }
1607
1608 /*! \brief Return a sub-grid iterator
1609 *
1610 * Return a sub-grid iterator, to iterate through the grid
1611 *
1612 * \param m Margin
1613 *
1614 * \return a sub-grid iterator
1615 *
1616 */
1617 inline grid_key_dx_iterator_sub<dim> getSubIterator(size_t m)
1618 {
1619 return grid_key_dx_iterator_sub<dim>(g1,m);
1620 }
1621
1622 /*! \brief Return a grid iterator
1623 *
1624 * Return a grid iterator, to iterate through the grid
1625 *
1626 * \return a grid iterator
1627 *
1628 */
1629 inline grid_key_dx_iterator<dim> getIterator() const
1630 {
1631 size_t sz[dim];
1632
1633 for (int i = 0 ; i < dim ; i++)
1634 {sz[i] = g1.size(i);}
1635
1636 grid_sm<dim,void> gvoid(sz);
1637
1638 return grid_key_dx_iterator<dim>(gvoid);
1639 }
1640
1641#ifdef CUDA_GPU
1642
1643 /*! \brief Convert the grid into a data-structure compatible for computing into GPU
1644 *
1645 * The object created can be considered like a reference of the original
1646 *
1647 */
1648 grid_gpu_ker<dim,T,layout_base> toKernel()
1649 {
1650 return grid_toKernelImpl<is_layout_inte<layout_base<T>>::value,dim,T>::toKernel(*this);
1651 }
1652
1653 /*! \brief Convert the grid into a data-structure compatible for computing into GPU
1654 *
1655 * The object created can be considered like a reference of the original
1656 *
1657 */
1658 const grid_gpu_ker<dim,T,layout_base> toKernel() const
1659 {
1660 return grid_toKernelImpl<is_layout_inte<layout_base<T>>::value,dim,T>::toKernel(*this);
1661 }
1662
1663#endif
1664
1665 /*! \brief Return a grid iterator
1666 *
1667 * Return a grid iterator, to iterate through the grid with stencil calculation
1668 *
1669 * \return a grid iterator with stencil calculation
1670 *
1671 */
1672 template<unsigned int Np>
1673 inline grid_key_dx_iterator<dim,stencil_offset_compute<dim,Np>>
1674 getIteratorStencil(const grid_key_dx<dim> (& stencil_pnt)[Np]) const
1675 {
1676 return grid_key_dx_iterator<dim,stencil_offset_compute<dim,Np>>(g1,stencil_pnt);
1677 }
1678
1679 /*! \brief Return a grid iterator over all points included between start and stop point
1680 *
1681 * Return a grid iterator over all the point with the exception of the
1682 * ghost part
1683 *
1684 * \param start point
1685 * \param stop point
1686 * \param to_init unused bool
1687 *
1688 * \return a sub-grid iterator
1689 *
1690 */
1691 inline grid_key_dx_iterator_sub<dim> getIterator(const grid_key_dx<dim> & start, const grid_key_dx<dim> & stop, bool to_init = false) const
1692 {
1693 size_t sz[dim];
1694
1695 for (int i = 0 ; i < dim ; i++)
1696 {sz[i] = g1.size(i);}
1697
1698 grid_sm<dim,void> gvoid(sz);
1699
1700 // get the starting point and the end point of the real domain
1701 return grid_key_dx_iterator_sub<dim>(gvoid,start,stop);
1702 }
1703
1704 /*! \brief return the internal data_
1705 *
1706 * return the internal data_
1707 *
1708 */
1709 layout & get_internal_data_()
1710 {
1711 return data_;
1712 }
1713
1714 /*! \brief return the internal data_
1715 *
1716 * return the internal data_
1717 *
1718 */
1719 const layout & get_internal_data_() const
1720 {
1721 return data_;
1722 }
1723
1724 /*! \brief In this case it does nothing
1725 *
1726 * \note this function exist to respect the interface to work as distributed
1727 *
1728 */
1729 void removeAddUnpackReset()
1730 {}
1731
1732 /*! \brief In this case it does nothing
1733 *
1734 * \note this function exist to respect the interface to work as distributed
1735 *
1736 * \param ctx context
1737 *
1738 */
1739 template<unsigned int ... prp, typename context_type>
1740 void removeAddUnpackFinalize(const context_type & ctx, int opt)
1741 {}
1742
1743 /*! \brief In this case it does nothing
1744 *
1745 * \note this function exist to respect the interface to work as distributed
1746 *
1747 * \param ctx context
1748 *
1749 */
1750 template<unsigned int ... prp, typename context_type>
1751 void removeCopyToFinalize(const context_type & ctx, int opt)
1752 {}
1753
1754 /*! \brief It does nothing
1755 *
1756 *
1757 */
1758 void resetFlush()
1759 {}
1760};
1761
1762
1763#endif /* OPENFPM_DATA_SRC_GRID_GRID_BASE_IMPLEMENTATION_HPP_ */
1764