1/*
2 * SparseGrid.hpp
3 *
4 * Created on: Oct 22, 2017
5 * Author: i-bird
6 */
7
8#ifndef OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
9#define OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_
10
11#include "memory_ly/memory_array.hpp"
12#include "memory_ly/memory_c.hpp"
13#include "memory_ly/memory_conf.hpp"
14#include "hash_map/hopscotch_map.h"
15#include "hash_map/hopscotch_set.h"
16#include "Vector/map_vector.hpp"
17#include "util/variadic_to_vmpl.hpp"
18#include "data_type/aggregate.hpp"
19#include "SparseGridUtil.hpp"
20#include "SparseGrid_iterator.hpp"
21#include "SparseGrid_iterator_block.hpp"
22#include "SparseGrid_conv_opt.hpp"
23//#include "util/debug.hpp"
24// We do not want parallel writer
25
26#ifdef OPENFPM_DATA_ENABLE_IO_MODULE
27#define NO_PARALLEL
28#include "VTKWriter/VTKWriter.hpp"
29#endif
30
31template<typename chunk_def>
32struct sparse_grid_bck_value
33{
34 chunk_def bck;
35
36 sparse_grid_bck_value(chunk_def bck)
37 :bck(bck)
38 {}
39
40 template<unsigned int p>
41 auto get() -> decltype(bck.template get<p>()[0])
42 {
43 return bck.template get<p>()[0];
44 }
45
46 template<unsigned int p>
47 auto get() const -> decltype(bck.template get<p>()[0])
48 {
49 return bck.template get<p>()[0];
50 }
51};
52
53template<typename ArrTypeView>
54struct std_array_vector_view
55{
56 int pos;
57 ArrTypeView arr;
58
59 std_array_vector_view(int pos,ArrTypeView arr)
60 :pos(pos),arr(arr)
61 {}
62
63 decltype(arr[0][0]) operator[](int comp)
64 {
65 return arr[comp][pos];
66 }
67
68 decltype(std::declval<const ArrTypeView>()[0][0]) operator[](int comp) const
69 {
70 return arr[comp][pos];
71 }
72};
73
74
75
76template<typename T>
77struct get_selector
78{
79 template<unsigned int p, typename chunks_vector_type>
80 static T & get(chunks_vector_type & chunks, size_t active_cnk, int ele_id)
81 {
82 return chunks.template get<p>(active_cnk)[ele_id];
83 }
84
85 template<unsigned int p, typename chunks_vector_type>
86 static const T & get_const(chunks_vector_type & chunks, size_t active_cnk, int ele_id)
87 {
88 return chunks.template get<p>(active_cnk)[ele_id];
89 }
90};
91
92template<typename T, unsigned int N1>
93struct get_selector<T[N1]>
94{
95 template<unsigned int p, typename chunks_vector_type>
96 static std_array_vector_view<decltype(std::declval<chunks_vector_type>().template get<p>(0))> get(chunks_vector_type & chunks, size_t active_cnk, int ele_id)
97 {
98 return std_array_vector_view<decltype(chunks.template get<p>(active_cnk))>(ele_id,chunks.template get<p>(active_cnk));
99 }
100
101 template<unsigned int p, typename chunks_vector_type>
102 static const std_array_vector_view<decltype(std::declval<chunks_vector_type>().template get<p>(0))> get_const(chunks_vector_type & chunks, size_t active_cnk, int ele_id)
103 {
104 return std_array_vector_view<decltype(chunks.template get<p>(active_cnk))>(ele_id,chunks.template get<p>(active_cnk));
105 }
106};
107
108
109template<typename Tsrc,typename Tdst>
110class copy_bck
111{
112 //! source
113 Tsrc & src;
114
115 //! destination
116 Tdst & dst;
117
118 size_t pos;
119
120public:
121
122 copy_bck(Tsrc & src, Tdst & dst,size_t pos)
123 :src(src),dst(dst),pos(pos)
124 {}
125
126 //! It call the copy function for each property
127 template<typename T>
128 inline void operator()(T& t) const
129 {
130 typedef typename std::remove_reference<decltype(src.template get<T::value>())>::type copy_rtype;
131
132 std_array_copy_chunks<T::value,copy_rtype>::copy(src,dst,pos);
133 }
134
135};
136
137template<typename Tsrc,typename Tdst>
138class copy_prop_to_vector
139{
140 //! source
141 Tsrc src;
142
143 //! destination
144 Tdst dst;
145
146 size_t pos;
147
148public:
149
150 copy_prop_to_vector(Tsrc src, Tdst dst,size_t pos)
151 :src(src),dst(dst),pos(pos)
152 {}
153
154 //! It call the copy function for each property
155 template<typename T>
156 inline void operator()(T& t) const
157 {
158 typedef typename std::remove_reference<decltype(dst.template get<T::value>())>::type copy_rtype;
159
160 meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>()[pos],dst.template get<T::value>());
161 }
162
163};
164
165template<unsigned int dim, typename Tsrc,typename Tdst>
166class copy_sparse_to_sparse
167{
168 //! source
169 const Tsrc & src;
170
171 //! destination
172 Tdst & dst;
173
174 //! source position
175 grid_key_dx<dim> & pos_src;
176
177 //! destination position
178 grid_key_dx<dim> & pos_dst;
179
180public:
181
182 copy_sparse_to_sparse(const Tsrc & src, Tdst & dst,grid_key_dx<dim> & pos_src, grid_key_dx<dim> & pos_dst)
183 :src(src),dst(dst),pos_src(pos_src),pos_dst(pos_dst)
184 {}
185
186 //! It call the copy function for each property
187 template<typename T>
188 inline void operator()(T& t) const
189 {
190 typedef typename std::remove_reference<decltype(dst.template insert<T::value>(pos_dst))>::type copy_rtype;
191
192 meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>(pos_src),dst.template insert<T::value>(pos_dst));
193 }
194
195};
196
197template<typename T>
198struct copy_sparse_to_sparse_bb_impl
199{
200 template<unsigned int prop, typename Tsrc, typename Tdst>
201 static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
202 {
203 typedef typename std::remove_reference<decltype(dst.template get<prop>()[pos_id_dst])>::type copy_rtype;
204
205 meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[pos_id_src],dst.template get<prop>()[pos_id_dst]);
206 }
207};
208
209template<typename T, unsigned int N1>
210struct copy_sparse_to_sparse_bb_impl<T[N1]>
211{
212 template<unsigned int prop, typename Tsrc, typename Tdst>
213 static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
214 {
215 typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][pos_id_dst])>::type copy_rtype;
216
217 for (int i = 0 ; i < N1 ; i++)
218 {
219 meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[i][pos_id_src],dst.template get<prop>()[i][pos_id_dst]);
220 }
221 }
222};
223
224template<typename T, unsigned int N1, unsigned int N2>
225struct copy_sparse_to_sparse_bb_impl<T[N1][N2]>
226{
227 template<unsigned int prop, typename Tsrc, typename Tdst>
228 static void copy(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
229 {
230 typedef typename std::remove_reference<decltype(dst.template get<prop>()[0][0][pos_id_dst])>::type copy_rtype;
231
232 for (int i = 0 ; i < N1 ; i++)
233 {
234 for (int j = 0 ; j < N2 ; j++)
235 {
236 meta_copy<copy_rtype>::meta_copy_(src.template get<prop>()[i][j][pos_id_src],dst.template get<prop>()[i][j][pos_id_dst]);
237 }
238 }
239 }
240};
241
242template<unsigned int dim, typename Tsrc,typename Tdst, typename aggrType>
243class copy_sparse_to_sparse_bb
244{
245 //! source
246 const Tsrc & src;
247
248 //! destination
249 Tdst & dst;
250
251 //! source position
252 short int pos_id_src;
253
254 //! destination position
255 short int pos_id_dst;
256
257public:
258
259 copy_sparse_to_sparse_bb(const Tsrc & src, Tdst & dst,short int pos_id_src, short int pos_id_dst)
260 :src(src),dst(dst),pos_id_src(pos_id_src),pos_id_dst(pos_id_dst)
261 {}
262
263 //! It call the copy function for each property
264 template<typename T>
265 inline void operator()(T& t) const
266 {
267/* typedef typename std::remove_reference<decltype(dst.template get<T::value>())>::type copy_rtype;
268
269 meta_copy<copy_rtype>::meta_copy_(src.template get<T::value>()[pos_id_src],dst.template get<T::value>()[pos_id_dst]);*/
270
271 typedef typename boost::mpl::at<typename aggrType::type, T>::type copy_rtype;
272
273 copy_sparse_to_sparse_bb_impl<copy_rtype>::template copy<T::value>(src,dst,pos_id_src,pos_id_dst);
274 }
275
276};
277
278template< template<typename,typename> class op,unsigned int dim, typename Tsrc,typename Tdst, unsigned int ... prp>
279class copy_sparse_to_sparse_op
280{
281 //! source
282 const Tsrc & src;
283
284 //! destination
285 Tdst & dst;
286
287 //! source position
288 grid_key_dx<dim> & pos_src;
289
290 //! destination position
291 grid_key_dx<dim> & pos_dst;
292
293 //! Convert the packed properties into an MPL vector
294 typedef typename to_boost_vmpl<prp...>::type v_prp;
295
296public:
297
298 copy_sparse_to_sparse_op(const Tsrc & src, Tdst & dst,grid_key_dx<dim> & pos_src, grid_key_dx<dim> & pos_dst)
299 :src(src),dst(dst),pos_src(pos_src),pos_dst(pos_dst)
300 {}
301
302 //! It call the copy function for each property
303 template<typename T>
304 inline void operator()(T& t) const
305 {
306 typedef typename boost::mpl::at<v_prp,boost::mpl::int_<T::value>>::type idx_type;
307 typedef typename std::remove_reference<decltype(dst.template insert<idx_type::value>(pos_dst))>::type copy_rtype;
308
309 if (dst.existPoint(pos_dst) == false)
310 {meta_copy_op<replace_,copy_rtype>::meta_copy_op_(src.template get<idx_type::value>(pos_src),dst.template insert<idx_type::value>(pos_dst));}
311 else
312 {meta_copy_op<op,copy_rtype>::meta_copy_op_(src.template get<idx_type::value>(pos_src),dst.template insert<idx_type::value>(pos_dst));}
313 }
314
315};
316
317
318
319/*! \brief this class is a functor for "for_each" algorithm
320 *
321 * This class is a functor for "for_each" algorithm. For each
322 * element of the boost::vector the operator() is called.
323 * Is mainly used to copy a boost::mpl::vector into runtime array
324 *
325 */
326
327template<unsigned int dim, typename mpl_v>
328struct copy_sz
329{
330 //! sz site_t
331 size_t (& sz)[dim];
332
333
334 /*! \brief constructor
335 *
336 * \param sz runtime sz to fill
337 *
338 */
339 inline copy_sz(size_t (& sz)[dim])
340 :sz(sz)
341 {
342 };
343
344 //! It call the copy function for each property
345 template<typename T>
346 inline void operator()(T& t) const
347 {
348 sz[T::value] = boost::mpl::at<mpl_v,boost::mpl::int_<T::value>>::type::value;
349 }
350};
351
352
353template<unsigned int N>
354struct load_mask_impl
355{
356 template<typename Vc_type>
357 static inline void load(Vc_type & Vc)
358 {
359 std::cout << __FILE__ << ":" << __LINE__ << " unknown size " << std::endl;
360 }
361};
362
363template<>
364struct load_mask_impl<1>
365{
366 template<typename Vc_type>
367 static inline void load(Vc_type & Vc, unsigned char * mask_sum)
368 {
369 Vc[0] = mask_sum[0];
370 }
371};
372
373template<>
374struct load_mask_impl<2>
375{
376 template<typename Vc_type>
377 static inline void load(Vc_type & Vc, unsigned char * mask_sum)
378 {
379 Vc[0] = mask_sum[0];
380 Vc[1] = mask_sum[1];
381 }
382};
383
384template<>
385struct load_mask_impl<4>
386{
387 template<typename Vc_type>
388 static inline void load(Vc_type & Vc, unsigned char * mask_sum)
389 {
390 Vc[0] = mask_sum[0];
391 Vc[1] = mask_sum[1];
392 Vc[2] = mask_sum[2];
393 Vc[3] = mask_sum[3];
394 }
395};
396
397template<>
398struct load_mask_impl<8>
399{
400 template<typename Vc_type>
401 static inline void load(Vc_type & Vc, unsigned char * mask_sum)
402 {
403 Vc[0] = mask_sum[0];
404 Vc[1] = mask_sum[1];
405 Vc[2] = mask_sum[2];
406 Vc[3] = mask_sum[3];
407 Vc[4] = mask_sum[4];
408 Vc[5] = mask_sum[5];
409 Vc[6] = mask_sum[6];
410 Vc[7] = mask_sum[7];
411 }
412};
413
414template<>
415struct load_mask_impl<16>
416{
417 template<typename Vc_type>
418 static inline void load(Vc_type & Vc, unsigned char * mask_sum)
419 {
420 Vc[0] = mask_sum[0];
421 Vc[1] = mask_sum[1];
422 Vc[2] = mask_sum[2];
423 Vc[3] = mask_sum[3];
424 Vc[4] = mask_sum[4];
425 Vc[5] = mask_sum[5];
426 Vc[6] = mask_sum[6];
427 Vc[7] = mask_sum[7];
428 Vc[8] = mask_sum[8];
429 Vc[9] = mask_sum[9];
430 Vc[10] = mask_sum[10];
431 Vc[11] = mask_sum[11];
432 Vc[12] = mask_sum[12];
433 Vc[13] = mask_sum[13];
434 Vc[14] = mask_sum[14];
435 Vc[15] = mask_sum[15];
436 }
437};
438
439template<typename Vc_type>
440inline Vc_type load_mask(unsigned char * mask_sum)
441{
442 Vc_type v;
443
444 load_mask_impl<Vc_type::Size>::load(v,mask_sum);
445
446 return v;
447}
448
449template<unsigned int dim,
450 typename T,
451 typename S,
452 typename grid_lin,
453 typename layout,
454 template<typename> class layout_base,
455 typename chunking>
456class sgrid_cpu
457{
458 //! cache pointer
459 mutable size_t cache_pnt;
460
461 //! cache
462 mutable long int cache[SGRID_CACHE];
463
464 //! cached id
465 mutable long int cached_id[SGRID_CACHE];
466
467 //! Map to convert from grid coordinates to chunk
468 tsl::hopscotch_map<size_t, size_t> map;
469
470 //! indicate which element in the chunk are really filled
471 openfpm::vector<cheader<dim>,S> header_inf;
472
473 openfpm::vector<mheader<chunking::size::value>,S> header_mask;
474
475 //Definition of the chunks
476 typedef typename v_transform_two_v2<Ft_chunk,boost::mpl::int_<chunking::size::value>,typename T::type>::type chunk_def;
477
478 //! background values
479 //aggregate_bfv<chunk_def> background;
480
481 typedef sgrid_cpu<dim,T,S,grid_lin,layout,layout_base,chunking> self;
482
483 //! vector of chunks
484 openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > chunks;
485
486 //! grid size information
487 grid_lin g_sm;
488
489 //! grid size information with shift
490 grid_lin g_sm_shift;
491
492 //! conversion position in the chunks
493 grid_key_dx<dim> pos_chunk[chunking::size::value];
494
495 //! size of the chunk
496 size_t sz_cnk[dim];
497
498 openfpm::vector<size_t> empty_v;
499
500 //! bool that indicate if the NNlist is filled
501 bool findNN;
502
503 //! for each chunk store the neighborhood chunks
504 openfpm::vector<int> NNlist;
505
506 /*! \brief Given a key return the chunk than contain that key, in case that chunk does not exist return the key of the
507 * background chunk
508 *
509 * \param v1 point to search
510 * \param return active_chunk
511 * \param return index inside the chunk
512 *
513 */
514 inline void find_active_chunk_from_point(const grid_key_dx<dim> & v1,size_t & active_cnk, short int & sub_id)
515 {
516 grid_key_dx<dim> kh = v1;
517 grid_key_dx<dim> kl;
518
519 // shift the key
520 key_shift<dim,chunking>::shift(kh,kl);
521
522 find_active_chunk(kh,active_cnk);
523
524 sub_id = sublin<dim,typename chunking::shift_c>::lin(kl);
525 }
526
527 /*! \brief Remove
528 *
529 *
530 */
531 template<unsigned int n_ele>
532 inline void remove_from_chunk(size_t sub_id,
533 int & nele,
534 unsigned char (& mask)[n_ele])
535 {
536 nele = (mask[sub_id])?nele-1:nele;
537
538 mask[sub_id] = 0;
539 }
540
541 /*! \brief reconstruct the map
542 *
543 *
544 *
545 */
546 inline void reconstruct_map()
547 {
548 // reconstruct map
549
550 map.clear();
551 for (size_t i = 1 ; i < header_inf.size() ; i++)
552 {
553 grid_key_dx<dim> kh = header_inf.get(i).pos;
554 grid_key_dx<dim> kl;
555
556 // shift the key
557 key_shift<dim,chunking>::shift(kh,kl);
558
559 long int lin_id = g_sm_shift.LinId(kh);
560
561 map[lin_id] = i;
562 }
563 }
564
565 /*! \brief Eliminate empty chunks
566 *
567 * \warning Because this operation is time consuming it perform the operation once
568 * we reach a critical size in the list of the empty chunks
569 *
570 */
571 inline void remove_empty()
572 {
573 if (empty_v.size() >= FLUSH_REMOVE)
574 {
575 // eliminate double entry
576
577 empty_v.sort();
578 empty_v.unique();
579
580 // Because chunks can be refilled the empty list can contain chunks that are
581 // filled so before remove we have to check that they are really empty
582
583 for (int i = empty_v.size() - 1 ; i >= 0 ; i--)
584 {
585 if (header_inf.get(empty_v.get(i)).nele != 0)
586 {empty_v.remove(i);}
587 }
588
589 header_inf.remove(empty_v);
590 header_mask.remove(empty_v);
591 chunks.remove(empty_v);
592
593 // reconstruct map
594
595 reconstruct_map();
596
597 empty_v.clear();
598
599 // cache must be cleared
600
601 clear_cache();
602 }
603 }
604
605 /*! \brief add on cache
606 *
607 * \param lin_id linearized id
608 * \param active_cnk active chunk
609 *
610 */
611 inline void add_on_cache(size_t lin_id, size_t active_cnk) const
612 {
613 // Add on cache the chunk
614 cache[cache_pnt] = lin_id;
615 cached_id[cache_pnt] = active_cnk;
616 cache_pnt++;
617 cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
618 }
619
620 /*! \brief reset the cache
621 *
622 *
623 */
624 inline void clear_cache()
625 {
626 cache_pnt = 0;
627 for (size_t i = 0 ; i < SGRID_CACHE ; i++)
628 {cache[i] = -1;}
629 }
630
631 /*! \brief set the grid shift from size
632 *
633 * \param sz size of the grid
634 * \param g_sm_shift chunks grid size
635 *
636 */
637 void set_g_shift_from_size(const size_t (& sz)[dim], grid_lin & g_sm_shift)
638 {
639 grid_key_dx<dim> cs;
640 grid_key_dx<dim> unused;
641
642 for (size_t i = 0 ; i < dim ; i++)
643 {cs.set_d(i,sz[i]);}
644
645 key_shift<dim,chunking>::shift(cs,unused);
646
647 size_t sz_i[dim];
648
649 for (size_t i = 0 ; i < dim ; i++)
650 {sz_i[i] = cs.get(i) + 1;}
651
652 g_sm_shift.setDimensions(sz_i);
653 }
654
655 /*! \brief initialize
656 *
657 *
658 */
659 void init()
660 {
661 findNN = false;
662
663 for (size_t i = 0 ; i < SGRID_CACHE ; i++)
664 {cache[i] = -1;}
665
666 // fill pos_g
667
668 copy_sz<dim,typename chunking::type> cpsz(sz_cnk);
669 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(cpsz);
670
671 grid_sm<dim,void> gs(sz_cnk);
672
673 grid_key_dx_iterator<dim,no_stencil,grid_sm<dim,void>> it(gs);
674 size_t cnt = 0;
675
676 while (it.isNext())
677 {
678 auto key = it.get();
679
680 for (size_t i = 0 ; i < dim ; i++)
681 {
682 pos_chunk[cnt].set_d(i,key.get(i));
683 }
684
685 ++cnt;
686 ++it;
687 }
688
689 // Add the bachground chunk at the begining
690
691 chunks.add();
692 header_inf.add();
693 for(int i = 0 ; i < dim ; i++)
694 {header_inf.last().pos.set_d(i,std::numeric_limits<long int>::min());};
695 header_inf.last().nele = 0;
696 header_mask.add();
697
698 // set the mask to null
699 auto & h = header_mask.last().mask;
700
701 for (size_t i = 0 ; i < chunking::size::value ; i++)
702 {h[i] = 0;}
703
704 // set the data to background
705 for (size_t i = 0 ; i < chunking::size::value ; i++)
706 {
707 auto c = chunks.get(0);
708
709 //copy_bck<decltype(background),decltype(c)> cb(background,c,i);
710
711 //boost::mpl::for_each_ref<boost::mpl::range_c<int,0,T::max_prop>>(cb);
712 }
713 }
714
715 /*! \brief Given a key return the chunk than contain that key, in case that chunk does not exist return the key of the
716 * background chunk
717 *
718 * \param v1 point to search
719 * \param return active_chunk
720 * \param return index inside the chunk
721 *
722 */
723 inline void find_active_chunk(const grid_key_dx<dim> & kh,size_t & active_cnk,bool & exist) const
724 {
725 long int lin_id = g_sm_shift.LinId(kh);
726
727 size_t id = 0;
728 for (size_t k = 0 ; k < SGRID_CACHE; k++)
729 {id += (cache[k] == lin_id)?k+1:0;}
730
731 if (id == 0)
732 {
733 // we do not have it in cache we check if we have it in the map
734
735 auto fnd = map.find(lin_id);
736 if (fnd == map.end())
737 {
738 exist = false;
739 active_cnk = 0;
740 return;
741 }
742 else
743 {active_cnk = fnd->second;}
744
745 // Add on cache the chunk
746 cache[cache_pnt] = lin_id;
747 cached_id[cache_pnt] = active_cnk;
748 cache_pnt++;
749 cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
750 }
751 else
752 {
753 active_cnk = cached_id[id-1];
754 cache_pnt = id;
755 cache_pnt = (cache_pnt == SGRID_CACHE)?0:cache_pnt;
756 }
757
758 exist = true;
759 }
760
761 /*! Given a key v1 in coordinates it calculate the chunk position and the position in the chunk
762 *
763 * \param v1 coordinates
764 * \param chunk position
765 * \param sub_id element id
766 *
767 */
768 inline void pre_get(const grid_key_dx<dim> & v1, size_t & active_cnk, size_t & sub_id, bool & exist) const
769 {
770 grid_key_dx<dim> kh = v1;
771 grid_key_dx<dim> kl;
772
773 // shift the key
774 key_shift<dim,chunking>::shift(kh,kl);
775
776 find_active_chunk(kh,active_cnk,exist);
777
778 sub_id = sublin<dim,typename chunking::shift_c>::lin(kl);
779 }
780
781 /*! \brief Before insert data you have to do this
782 *
783 * \param v1 grid key where you want to insert data
784 * \param active_cnk output which chunk is competent for this data
785 * \param sub_id element inside the chunk
786 *
787 */
788 inline bool pre_insert(const grid_key_dx<dim> & v1, size_t & active_cnk, size_t & sub_id)
789 {
790 bool exist = true;
791 active_cnk = 0;
792
793 grid_key_dx<dim> kh = v1;
794 grid_key_dx<dim> kl;
795
796 // shift the key
797 key_shift<dim,chunking>::shift(kh,kl);
798
799 long int lin_id = g_sm_shift.LinId(kh);
800
801 size_t id = 0;
802 for (size_t k = 0 ; k < SGRID_CACHE; k++)
803 {id += (cache[k] == lin_id)?k+1:0;}
804
805 if (id == 0)
806 {
807 // we do not have it in cache we check if we have it in the map
808
809 auto fnd = map.find(lin_id);
810 if (fnd == map.end())
811 {
812 // we do not have it in the map create a chunk
813
814 map[lin_id] = chunks.size();
815 chunks.add();
816 header_inf.add();
817 header_inf.last().pos = kh;
818 header_inf.last().nele = 0;
819 header_mask.add();
820
821 // set the mask to null
822 auto & h = header_mask.last().mask;
823
824 for (size_t i = 0 ; i < chunking::size::value ; i++)
825 {h[i] = 0;}
826
827 key_shift<dim,chunking>::cpos(header_inf.last().pos);
828
829 active_cnk = chunks.size() - 1;
830 }
831 else
832 {
833 // we have it in the map
834
835 active_cnk = fnd->second;
836 }
837
838 // Add on cache the chunk
839 cache[cache_pnt] = lin_id;
840 cached_id[cache_pnt] = active_cnk;
841 cache_pnt++;
842 cache_pnt = (cache_pnt >= SGRID_CACHE)?0:cache_pnt;
843 }
844 else
845 {
846 active_cnk = cached_id[id-1];
847 cache_pnt = id;
848 cache_pnt = (cache_pnt == SGRID_CACHE)?0:cache_pnt;
849 }
850
851 sub_id = sublin<dim,typename chunking::shift_c>::lin(kl);
852
853 // the chunk is in cache, solve
854
855 // we notify that we added one element
856 auto & hc = header_inf.get(active_cnk);
857 auto & hm = header_mask.get(active_cnk);
858
859 exist = hm.mask[sub_id];
860 hc.nele = (exist)?hc.nele:hc.nele + 1;
861 hm.mask[sub_id] |= 1;
862
863 return exist;
864 }
865
866 inline void remove_point(const grid_key_dx<dim> & v1)
867 {
868 bool exist;
869 size_t active_cnk = 0;
870 size_t sub_id;
871
872 pre_get(v1,active_cnk,sub_id,exist);
873
874 if (exist == false)
875 {return;}
876
877 // eliminate the element
878
879 auto & hm = header_mask.get(active_cnk);
880 auto & hc = header_inf.get(active_cnk);
881 unsigned char swt = hm.mask[sub_id];
882
883 hc.nele = (swt)?hc.nele-1:hc.nele;
884
885 hm.mask[sub_id] = 0;
886
887 if (hc.nele == 0 && swt != 0)
888 {
889 // Add the chunks in the empty list
890 empty_v.add(active_cnk);
891 }
892 }
893
894public:
895
896 //! it define that this data-structure is a grid
897 typedef int yes_i_am_grid;
898
899 //! base_key for the grid
900 typedef grid_key_dx<dim> base_key;
901
902 //! expose the dimansionality as a static const
903 static constexpr unsigned int dims = dim;
904
905 //! value that the grid store
906 //! The object type the grid is storing
907 typedef T value_type;
908
909 //! sub-grid iterator type
910 typedef grid_key_sparse_dx_iterator_sub<dim, chunking::size::value> sub_grid_iterator_type;
911
912 //! Background type
913 typedef aggregate_bfv<chunk_def> background_type;
914
915 typedef layout_base<T> memory_traits;
916
917 typedef chunking chunking_type;
918
919 typedef grid_lin linearizer_type;
920
921 /*! \brief Trivial constructor
922 *
923 *
924 */
925 inline sgrid_cpu()
926 :cache_pnt(0)
927 {
928 init();
929 }
930
931 /*! \brief create a sparse grid from another grid
932 *
933 * \param g the grid to copy
934 *
935 */
936 inline sgrid_cpu(const sgrid_cpu & g) THROW
937 {
938 this->operator=(g);
939 }
940
941 /*! \brief create a sparse grid from another grid
942 *
943 * \param g the grid to copy
944 *
945 */
946 inline sgrid_cpu(const sgrid_cpu && g) THROW
947 {
948 this->operator=(g);
949 }
950
951 /*! \brief Constructor for sparse grid
952 *
953 * \param sz size in each direction of the sparse grid
954 *
955 */
956 sgrid_cpu(const size_t (& sz)[dim])
957 :cache_pnt(0),g_sm(sz)
958 {
959 // calculate the chunks grid
960
961 set_g_shift_from_size(sz,g_sm_shift);
962
963 // fill pos_g
964
965 init();
966 }
967
968 /*! \brief Set the background value for the property p
969 *
970 * \return background value
971 *
972 */
973 template<unsigned int p>
974 void setBackgroundValue(const typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type & val)
975 {
976 for (int i = 0 ; i < chunking::size::value ; i++)
977 {meta_copy<typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type>::meta_copy_(val,chunks.get(0).template get<p>()[i]);}
978 }
979
980 /*! \brief Get the background value
981 *
982 * \return background value
983 *
984 */
985 sparse_grid_bck_value<typename std::remove_reference<decltype(chunks.get(0))>::type> getBackgroundValue()
986 {
987 return sparse_grid_bck_value<typename std::remove_reference<decltype(chunks.get(0))>::type>(chunks.get(0));
988 }
989
990 /*! \brief Stub does not do anything
991 *
992 */
993 template<typename pointers_type,
994 typename headers_type,
995 typename result_type,
996 unsigned int ... prp >
997 static void unpack_headers(pointers_type & pointers, headers_type & headers, result_type & result, int n_slot)
998 {}
999
1000 template<unsigned int ... prp, typename S2, typename header_type, typename ite_type, typename context_type>
1001 void unpack_with_headers(ExtPreAlloc<S2> & mem,
1002 ite_type & sub_it,
1003 header_type & headers,
1004 int ih,
1005 Unpack_stat & ps,
1006 context_type &context,
1007 rem_copy_opt opt = rem_copy_opt::NONE_OPT)
1008 {}
1009
1010 /*! \brief Get the background value
1011 *
1012 * \return background value
1013 *
1014 */
1015 auto getBackgroundValueAggr() -> decltype(chunks.get(0))
1016 {
1017 return chunks.get(0);
1018 }
1019
1020 /*! \brief This is a multiresolution sparse grid so is a compressed format
1021 *
1022 * \return true
1023 *
1024 */
1025 static constexpr bool isCompressed()
1026 {
1027 return true;
1028 }
1029
1030 /*! \brief Insert a full element (with all properties)
1031 *
1032 * \param v1 where you want to insert the element
1033 *
1034 */
1035 inline auto insert_o(const grid_key_dx<dim> & v1, size_t & ele_id) -> decltype(chunks.get_o(0))
1036 {
1037 size_t active_cnk;
1038
1039 pre_insert(v1,active_cnk,ele_id);
1040
1041 return chunks.get_o(active_cnk);
1042 }
1043
1044 /*! \brief Get the reference of the selected element
1045 *
1046 * \param v1 grid_key that identify the element in the grid
1047 *
1048 * \return the reference of the element
1049 *
1050 */
1051 template <unsigned int p, typename r_type=decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,0,0))>
1052 inline r_type insert(const grid_key_dx<dim> & v1)
1053 {
1054 size_t active_cnk = 0;
1055 size_t ele_id = 0;
1056
1057 pre_insert(v1,active_cnk,ele_id);
1058
1059 return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,active_cnk,ele_id);
1060 }
1061
1062 /*! \brief Get the reference of the selected element
1063 *
1064 * \param v1 grid_key that identify the element in the grid
1065 *
1066 * \return the reference of the element
1067 *
1068 */
1069 template <unsigned int p, typename r_type=decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,0,0))>
1070 inline r_type insert(const grid_key_sparse_lin_dx & v1)
1071 {
1072 size_t active_cnk = v1.getChunk();
1073 size_t sub_id = v1.getPos();
1074
1075 // the chunk is in cache, solve
1076
1077 // we notify that we added one element
1078 auto & hm = header_mask.get(active_cnk);
1079 auto & hc = header_inf.get(active_cnk);
1080
1081 // we set the mask
1082 hc.nele = (hm.mask[sub_id] & 1)?hc.nele:hc.nele + 1;
1083 hm.mask[sub_id] |= 1;
1084
1085 return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get<p>(chunks,active_cnk,sub_id);
1086 }
1087
1088 /*! \brief Get the reference of the selected element
1089 *
1090 * \param v1 grid_key that identify the element in the grid
1091 *
1092 * \return the reference of the element
1093 *
1094 */
1095 template <unsigned int p>
1096 inline auto get(const grid_key_dx<dim> & v1) const -> decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,0))
1097 {
1098 bool exist;
1099 size_t active_cnk;
1100 size_t sub_id;
1101
1102 pre_get(v1,active_cnk,sub_id,exist);
1103
1104 if (exist == false)
1105 {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1106
1107 // we check the mask
1108 auto & hm = header_mask.get(active_cnk);
1109
1110 if ((hm.mask[sub_id] & 1) == 0)
1111 {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1112
1113 return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);
1114 }
1115
1116 /*! \brief Indicate that unpacking the header is supported
1117 *
1118 * \return false
1119 *
1120 */
1121 static bool is_unpack_header_supported()
1122 {return false;}
1123
1124 /*! \brief Get the reference of the selected element
1125 *
1126 * \param v1 grid_key that identify the element in the grid
1127 *
1128 * \return the reference of the element
1129 *
1130 */
1131 template <unsigned int p>
1132 inline auto get(const grid_key_dx<dim> & v1) -> decltype(get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,0))
1133 {
1134 bool exist;
1135 size_t active_cnk;
1136 size_t sub_id;
1137
1138 pre_get(v1,active_cnk,sub_id,exist);
1139
1140 if (exist == false)
1141 {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);}
1142
1143 // we check the mask
1144 auto & hc = header_inf.get(active_cnk);
1145 auto & hm = header_mask.get(active_cnk);
1146
1147 if ((hm.mask[sub_id] & 1) == 0)
1148 {return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,0,sub_id);}
1149
1150 return get_selector< typename boost::mpl::at<typename T::type,boost::mpl::int_<p>>::type >::template get_const<p>(chunks,active_cnk,sub_id);
1151 }
1152
1153 /*! \brief Check if the point exist
1154 *
1155 * \param v1 grid_key that identify the element in the grid
1156 *
1157 * \return the true if the point exist
1158 *
1159 */
1160 inline bool existPoint(const grid_key_dx<dim> & v1) const
1161 {
1162 bool exist;
1163 size_t active_cnk;
1164 size_t sub_id;
1165
1166 pre_get(v1,active_cnk,sub_id,exist);
1167
1168 if (exist == false)
1169 {return false;}
1170
1171 // we check the mask
1172 auto & hm = header_mask.get(active_cnk);
1173
1174 if ((hm.mask[sub_id] & 1) == 0)
1175 {return false;}
1176
1177 return true;
1178 }
1179
1180 /*! \brief Get the reference of the selected element
1181 *
1182 * \param v1 grid_key that identify the element in the grid
1183 *
1184 * \return the reference of the element
1185 *
1186 */
1187 template <unsigned int p>
1188 inline auto get(const grid_key_sparse_lin_dx & v1) -> decltype(chunks.template get<p>(0)[0])
1189 {
1190 return chunks.template get<p>(v1.getChunk())[v1.getPos()];
1191 }
1192
1193 /*! \brief Get the reference of the selected block
1194 *
1195 * \param v1 grid_key that identify the element in the grid
1196 *
1197 * \return the reference of the element
1198 *
1199 */
1200 inline auto getBlock(const grid_key_sparse_lin_dx & v1) const -> decltype(chunks.get(0))
1201 {
1202 return chunks.get(v1.getChunk());
1203 }
1204
1205 /*! \brief Get the point flag (in this case it always return 0)
1206 *
1207 * \param v1 grid_key that identify the element in the grid
1208 *
1209 * \return 0
1210 *
1211 */
1212 inline unsigned char getFlag(const grid_key_dx<dim> & v1) const
1213 {
1214 return 0;
1215 }
1216
1217 /*! \brief Return a Domain iterator
1218 *
1219 * \return return the domain iterator
1220 *
1221 */
1222 grid_key_sparse_dx_iterator<dim,chunking::size::value>
1223 getIterator(size_t opt = 0) const
1224 {
1225 return grid_key_sparse_dx_iterator<dim,chunking::size::value>(&header_mask,&header_inf,&pos_chunk);
1226 }
1227
1228 /*! \brief Return an iterator over a sub-grid
1229 *
1230 * \return return an iterator over a sub-grid
1231 *
1232 */
1233 grid_key_sparse_dx_iterator_sub<dim,chunking::size::value>
1234 getIterator(const grid_key_dx<dim> & start, const grid_key_dx<dim> & stop, size_t opt = 0) const
1235 {
1236 return grid_key_sparse_dx_iterator_sub<dim,chunking::size::value>(header_mask,header_inf,pos_chunk,start,stop,sz_cnk);
1237 }
1238
1239 /*! \brief Return an iterator over a sub-grid
1240 *
1241 * \tparam stencil size
1242 * \param start point
1243 * \param stop point
1244 *
1245 * \return an iterator over sub-grid blocks
1246 *
1247 */
1248 template<unsigned int stencil_size = 0>
1249 grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>
1250 getBlockIterator(const grid_key_dx<dim> & start, const grid_key_dx<dim> & stop)
1251 {
1252 return grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,self,chunking>(*this,start,stop);
1253 }
1254
1255 /*! \brief Return the internal grid information
1256 *
1257 * Return the internal grid information
1258 *
1259 * \return the internal grid
1260 *
1261 */
1262 const grid_lin & getGrid() const
1263 {
1264 return g_sm;
1265 }
1266
1267 /*! \brief Remove the point
1268 *
1269 * \param v1 element to remove
1270 *
1271 */
1272 void remove(const grid_key_dx<dim> & v1)
1273 {
1274 remove_point(v1);
1275 remove_empty();
1276 }
1277
1278 /*! \brief Remove the point but do not flush the remove
1279 *
1280 * \param v1 element to remove
1281 *
1282 */
1283 void remove_no_flush(const grid_key_dx<dim> & v1)
1284 {
1285 remove_point(v1);
1286 }
1287
1288 /*! \brief Expand and tag boundaries
1289 *
1290 * \param stencil_type type of boundaries
1291 * \param start starting point where we tag the boundaries
1292 * \param stop point where we tag the boundaries
1293 *
1294 */
1295 template<typename stencil_type>
1296 void expandAndTagBoundaries(grid_key_dx<dim> & start, grid_key_dx<dim> & stop)
1297 {
1298
1299 }
1300
1301 /*! \brief Remove the point
1302 *
1303 * \param v1 element to remove
1304 *
1305 */
1306 void flush_remove()
1307 {
1308 remove_empty();
1309 }
1310
1311 /*! \brief Resize the grid
1312 *
1313 * The old information is retained on the new grid if the new grid is bigger.
1314 * When smaller the data are cropped
1315 *
1316 * \param sz reference to an array of dimension dim
1317 *
1318 */
1319 void resize(const size_t (& sz)[dim])
1320 {
1321 bool is_bigger = true;
1322
1323 // we check if we are resizing bigger, because if is the case we do not have to do
1324 // much
1325
1326 for (size_t i = 0 ; i < dim ; i++)
1327 {
1328 if (sz[i] < g_sm.size(i))
1329 {is_bigger = false;}
1330 }
1331
1332 g_sm.setDimensions(sz);
1333
1334 // set g_sm_shift
1335
1336 set_g_shift_from_size(sz,g_sm_shift);
1337
1338 clear_cache();
1339
1340 if (is_bigger == true)
1341 {
1342
1343 // if we resize bigger we do not have to do anything in the headers
1344 // and in the chunks we just have to update g_sm and reset the cache
1345 // and reconstruct the map. So we reconstruct the map and we just
1346 // finish
1347
1348 reconstruct_map();
1349
1350 return;
1351 }
1352
1353 // create a box that is as big as the grid
1354
1355 Box<dim,size_t> gs_box;
1356
1357 for (size_t i = 0 ; i < dim ; i++)
1358 {
1359 gs_box.setLow(i,0);
1360 gs_box.setHigh(i,g_sm.size(i));
1361 }
1362
1363 // we take a list of all chunks to remove
1364 openfpm::vector<size_t> rmh;
1365
1366 // in this case we have to crop data, we go through all the headers
1367
1368 for (size_t i = 1 ; i < header_inf.size() ; i++)
1369 {
1370 Box<dim,size_t> cnk;
1371
1372 for (size_t j = 0 ; j < dim ; j++)
1373 {
1374 cnk.setLow(j,header_inf.get(i).pos.get(j));
1375 cnk.setHigh(j,sz_cnk[j] + header_inf.get(i).pos.get(j));
1376 }
1377
1378 // if the chunk is not fully contained in the new smaller sparse grid
1379 // we have to crop it
1380 if (!cnk.isContained(gs_box))
1381 {
1382 // We check if the chunks is fully out or only partially in
1383 // cheking the intersection between the new grid and the box
1384 // enclosing the chunk as it was before
1385 Box<dim,size_t> inte;
1386
1387 if (gs_box.Intersect(cnk,inte))
1388 {
1389 // part of the chunk is in, part is out
1390
1391 // shift P1 to the origin
1392 // this is the valid box everything out must me reset
1393 inte -= inte.getP1();
1394
1395 int mask_nele;
1396 short unsigned int mask_it[chunking::size::value];
1397
1398 auto & mask = header_mask.get(i).mask;
1399 auto & n_ele = header_inf.get(i).nele;
1400
1401 // ok so the box is not fully contained so we must crop data
1402
1403 fill_mask(mask_it,mask,mask_nele);
1404
1405 // now we have the mask of all the filled elements
1406
1407 for (size_t j = 0 ; j < mask_nele ; j++)
1408 {
1409 if (!inte.isInside(pos_chunk[mask_it[j]].toPoint()))
1410 {
1411 // if is not inside, the point must be deleted
1412
1413 remove_from_chunk<chunking::size::value>(mask_it[j],n_ele,mask);
1414 }
1415 }
1416 }
1417 else
1418 {
1419 // the chunk is completely out and must be removed completely
1420 // we add it to the list of the chunks to remove
1421
1422 rmh.add(i);
1423 }
1424 }
1425 }
1426
1427 header_inf.remove(rmh,0);
1428 header_mask.remove(rmh,0);
1429 chunks.remove(rmh,0);
1430
1431 reconstruct_map();
1432 }
1433
1434 /*! \brief Calculate the memory size required to pack n elements
1435 *
1436 * Calculate the total size required to store n-elements in a vector
1437 *
1438 * \param n number of elements
1439 * \param e unused
1440 *
1441 * \return the size of the allocation number e
1442 *
1443 */
1444 template<int ... prp> static inline size_t packMem(size_t n, size_t e)
1445 {
1446 if (sizeof...(prp) == 0)
1447 {return n * sizeof(typename T::type);}
1448
1449 typedef object<typename object_creator<typename T::type,prp...>::type> prp_object;
1450
1451 return n * sizeof(prp_object);
1452 }
1453
1454 /*! \brief Reset the pack calculation
1455 *
1456 * \note in this case it does nothing
1457 *
1458 */
1459 void packReset()
1460 {}
1461
1462 /*! \brief Calculate the size of the information to pack
1463 *
1464 * \in this case it does nothing
1465 *
1466 * \param req output size
1467 * \param context gpu contect
1468 *
1469 */
1470 template<int ... prp, typename context_type> inline
1471 void packCalculate(size_t & req, const context_type & context)
1472 {}
1473
1474 /*! \brief Insert an allocation request
1475 *
1476 * \tparam prp set of properties to pack
1477 *
1478 *
1479 * \param sub sub-grid iterator
1480 * \param vector of requests
1481 *
1482 */
1483 template<int ... prp> inline
1484 void packRequest(size_t & req) const
1485 {
1486 grid_sm<dim,void> gs_cnk(sz_cnk);
1487
1488 // For sure we have to pack the number of chunk we want to pack
1489
1490 req += sizeof(size_t);
1491 req += dim*sizeof(size_t);
1492
1493 // Here we have to calculate the number of points to pack (skip the background)
1494
1495 for (size_t i = 1 ; i < header_inf.size() ; i++)
1496 {
1497 auto & hm = header_mask.get(i);
1498
1499 int mask_nele;
1500 short unsigned int mask_it[chunking::size::value];
1501
1502 fill_mask(mask_it,hm.mask,mask_nele);
1503
1504 for (size_t j = 0 ; j < mask_nele ; j++)
1505 {
1506 // If all of the aggregate properties do not have a "pack()" member
1507 if (has_pack_agg<T,prp...>::result::value == false)
1508 {
1509 // here we count how many chunks must be sent
1510
1511 size_t alloc_ele = this->packMem<prp...>(1,0);
1512 req += alloc_ele;
1513 }
1514 else
1515 {
1516 //Call a pack request
1517 call_aggregatePackRequestChunking<decltype(chunks.get_o(i)),
1518 S,prp ... >
1519 ::call_packRequest(chunks.get_o(i),mask_it[j],req);
1520 }
1521 }
1522
1523 // There are point to send. So we have to save the mask chunk
1524 req += sizeof(header_mask.get(i).mask);
1525 // the chunk position
1526 req += sizeof(header_inf.get(i).pos);
1527 // and the number of element
1528 req += sizeof(header_inf.get(i).nele);
1529 }
1530 }
1531
1532 /*! \brief Reset the queue to remove and copy section of grids
1533 *
1534 * \note for this particular implementation it does nothing
1535 *
1536 */
1537 void copyRemoveReset()
1538 {
1539 }
1540
1541 /*! \brief Insert an allocation request
1542 *
1543 * \tparam prp set of properties to pack
1544 *
1545
1546 * \param sub sub-grid to pack
1547 * \param vector of requests
1548 *
1549 */
1550 template<int ... prp> inline
1551 void packRequest(grid_key_sparse_dx_iterator_sub<dim,chunking::size::value> & sub_it,
1552 size_t & req) const
1553 {
1554 grid_sm<dim,void> gs_cnk(sz_cnk);
1555
1556 // For sure we have to pack the number of chunk we want to pack
1557
1558 req += sizeof(size_t);
1559 req += dim*sizeof(size_t);
1560
1561 // Here we have to calculate the number of points to pack
1562
1563 Box<dim,size_t> section_to_pack;
1564
1565 for (size_t i = 0; i < dim ; i++)
1566 {
1567 section_to_pack.setLow(i,sub_it.getStart().get(i));
1568 section_to_pack.setHigh(i,sub_it.getStop().get(i));
1569 }
1570
1571 for (size_t i = 0 ; i < header_inf.size() ; i++)
1572 {
1573 auto & hm = header_mask.get(i);
1574
1575 Box<dim,size_t> bc;
1576
1577 for (size_t j = 0 ; j < dim ; j++)
1578 {
1579 bc.setLow(j,header_inf.get(i).pos.get(j));
1580 bc.setHigh(j,header_inf.get(i).pos.get(j) + sz_cnk[j] - 1);
1581 }
1582
1583 // now we intersect the chunk box with the box
1584
1585 Box<dim,size_t> inte;
1586 bool stp = bc.Intersect(section_to_pack,inte);
1587
1588 if (stp == true)
1589 {
1590 // If it is intersect ok we have to check if there are points to pack
1591 // we shift inte to be relative to the chunk origin
1592
1593 inte -= header_inf.get(i).pos.toPoint();
1594
1595 // we iterate all the points
1596
1597 size_t old_req = req;
1598 grid_key_dx_iterator_sub<dim,no_stencil,grid_sm<dim,void>> sit(gs_cnk,inte.getKP1(),inte.getKP2());
1599
1600 while (sit.isNext())
1601 {
1602 auto key = sit.get();
1603
1604 size_t sub_id = gs_cnk.LinId(key);
1605
1606 if (hm.mask[sub_id] & 1)
1607 {
1608 // If all of the aggregate properties do not have a "pack()" member
1609 if (has_pack_agg<T,prp...>::result::value == false)
1610 {
1611 // here we count how many chunks must be sent
1612
1613 size_t alloc_ele = this->packMem<prp...>(1,0);
1614 req += alloc_ele;
1615 }
1616 //If at least one property has "pack()"
1617 else
1618 {
1619 //Call a pack request
1620 call_aggregatePackRequestChunking<decltype(chunks.get_o(i)),
1621 S,prp ... >
1622 ::call_packRequest(chunks.get_o(i),sub_id,req);
1623 }
1624 }
1625
1626 ++sit;
1627 }
1628
1629 if (old_req != req)
1630 {
1631 // There are point to send. So we have to save the mask chunk
1632 req += sizeof(header_mask.get(i));
1633 // the chunk position
1634 req += sizeof(header_inf.get(i).pos);
1635 // and the number of element
1636 req += sizeof(header_inf.get(i).nele);
1637 }
1638 }
1639 }
1640 }
1641
1642 /*! \brief Pack the object into the memory given an iterator
1643 *
1644 * \tparam prp properties to pack
1645 *
1646 * \param mem preallocated memory where to pack the objects
1647 * \param sub_it sub grid iterator ( or the elements in the grid to pack )
1648 * \param sts pack statistic
1649 *
1650 */
1651 template<int ... prp> void pack(ExtPreAlloc<S> & mem,
1652 grid_key_sparse_dx_iterator_sub<dims,chunking::size::value> & sub_it,
1653 Pack_stat & sts)
1654 {
1655 grid_sm<dim,void> gs_cnk(sz_cnk);
1656
1657 // Here we allocate a size_t that indicate the number of chunk we are packing,
1658 // because we do not know a priory, we will fill it later
1659
1660 mem.allocate(sizeof(size_t));
1661 size_t * number_of_chunks = (size_t *)mem.getPointer();
1662
1663 // Pack the size of the grid
1664
1665 for (size_t i = 0 ; i < dim ; i++)
1666 {Packer<size_t,S>::pack(mem,getGrid().size(i),sts);}
1667
1668 // Here we have to calculate the number of points to pack
1669
1670 Box<dim,size_t> section_to_pack;
1671
1672 for (size_t i = 0; i < dim ; i++)
1673 {
1674 section_to_pack.setLow(i,sub_it.getStart().get(i));
1675 section_to_pack.setHigh(i,sub_it.getStop().get(i));
1676 }
1677
1678 size_t n_packed_chunk = 0;
1679
1680 for (size_t i = 0 ; i < header_inf.size() ; i++)
1681 {
1682 auto & hc = header_inf.get(i);
1683 auto & hm = header_mask.get(i);
1684
1685 Box<dim,size_t> bc;
1686
1687 for (size_t j = 0 ; j < dim ; j++)
1688 {
1689 bc.setLow(j,hc.pos.get(j));
1690 bc.setHigh(j,hc.pos.get(j) + sz_cnk[j] - 1);
1691 }
1692
1693 // now we intersect the chunk box with the box
1694
1695 Box<dim,size_t> inte;
1696 bool stp = bc.Intersect(section_to_pack,inte);
1697
1698 if (stp == true)
1699 {
1700 // This flag indicate if something has been packed from this chunk
1701 bool has_packed = false;
1702
1703 unsigned char mask_to_pack[chunking::size::value];
1704 memset(mask_to_pack,0,sizeof(mask_to_pack));
1705 mem.allocate_nocheck(sizeof(header_mask.get(i)) + sizeof(header_inf.get(i).pos) + sizeof(header_inf.get(i).nele));
1706
1707 // here we get the pointer of the memory in case we have to pack the header
1708 // and we also shift the memory pointer by an offset equal to the header
1709 // to pack
1710 unsigned char * ptr_start = (unsigned char *)mem.getPointer();
1711
1712 // If it is intersect ok we have to check if there are points to pack
1713 // we shift inte intp the chunk origin
1714
1715 inte -= hc.pos.toPoint();
1716
1717 // we iterate all the points
1718
1719 grid_key_dx_iterator_sub<dim,no_stencil,grid_sm<dim,void>> sit(gs_cnk,inte.getKP1(),inte.getKP2());
1720
1721 while (sit.isNext())
1722 {
1723 auto key = sit.get();
1724
1725 size_t sub_id = gs_cnk.LinId(key);
1726
1727 if (hm.mask[sub_id] & 1)
1728 {
1729 Packer<decltype(chunks.get_o(i)),
1730 S,
1731 PACKER_ENCAP_OBJECTS_CHUNKING>::template pack<T,prp...>(mem,chunks.get_o(i),sub_id,sts);
1732
1733 mask_to_pack[sub_id] |= 1;
1734 has_packed = true;
1735
1736 }
1737
1738 ++sit;
1739 }
1740
1741 if (has_packed == true)
1742 {
1743 unsigned char * ptr_final = (unsigned char *)mem.getPointer();
1744 unsigned char * ptr_final_for = (unsigned char *)mem.getPointerEnd();
1745
1746 // Ok we packed something so we have to pack the header
1747 size_t shift = ptr_final - ptr_start;
1748
1749 mem.shift_backward(shift);
1750
1751 // The position of the chunks
1752
1753 grid_key_dx<dim> pos = header_inf.get(i).pos - sub_it.getStart();
1754
1755 Packer<decltype(header_mask.get(i).mask),S>::pack(mem,mask_to_pack,sts);
1756 Packer<decltype(header_inf.get(i).pos),S>::pack(mem,pos,sts);
1757 Packer<decltype(header_inf.get(i).nele),S>::pack(mem,header_inf.get(i).nele,sts);
1758
1759 size_t shift_for = ptr_final_for - (unsigned char *)mem.getPointer();
1760
1761 mem.shift_forward(shift_for);
1762
1763 n_packed_chunk++;
1764 }
1765 else
1766 {
1767 // This just reset the last allocation
1768 mem.shift_backward(0);
1769 }
1770 }
1771 }
1772
1773 // Now we fill the number of packed chunks
1774 *number_of_chunks = n_packed_chunk;
1775 }
1776
1777 /*! \brief In this case it does nothing
1778 *
1779 * \note this function exist to respect the interface to work as distributed
1780 *
1781 */
1782 void removeAddUnpackReset()
1783 {}
1784
1785 /*! \brief It does nothing
1786 *
1787 *
1788 */
1789 void resetFlush()
1790 {}
1791
1792 /*! \brief In this case it does nothing
1793 *
1794 * \note this function exist to respect the interface to work as distributed
1795 *
1796 * \param ctx context
1797 *
1798 */
1799 template<unsigned int ... prp, typename context_type>
1800 void removeAddUnpackFinalize(const context_type & ctx, int opt)
1801 {}
1802
1803
1804 /*! \brief In this case it does nothing
1805 *
1806 * \note this function exist to respect the interface to work as distributed
1807 *
1808 * \param ctx context
1809 *
1810 */
1811 template<unsigned int ... prp, typename context_type>
1812 void removeCopyToFinalize(const context_type & ctx, int opt)
1813 {}
1814
1815 /*! \brief This function check if keep geometry is possible for this grid
1816 *
1817 * \return false it does not exist this feature on this type of grids
1818 *
1819 */
1820 bool isSkipLabellingPossible()
1821 {
1822 return false;
1823 }
1824
1825 /*! \brief Pack finalize Finalize the pack of this object. In this case it does nothing
1826 *
1827 * \tparam prp properties to pack
1828 *
1829 * \param mem preallocated memory where to pack the objects
1830 * \param sts pack statistic
1831 * \param opt options
1832 * \param is_pack_remote indicate this pack is remote
1833 *
1834 */
1835 template<int ... prp> void packFinalize(ExtPreAlloc<S> & mem, Pack_stat & sts, int opt, bool is_pack_remote)
1836 {}
1837
1838 /*! \brief Pack the object into the memory given an iterator
1839 *
1840 * \tparam prp properties to pack
1841 *
1842 * \param mem preallocated memory where to pack the objects
1843 * \param sub_it sub grid iterator ( or the elements in the grid to pack )
1844 * \param sts pack statistic
1845 *
1846 */
1847 template<int ... prp> void pack(ExtPreAlloc<S> & mem,
1848 Pack_stat & sts) const
1849 {
1850 grid_sm<dim,void> gs_cnk(sz_cnk);
1851
1852 // Here we allocate a size_t that indicate the number of chunk we are packing,
1853 // because we do not know a priory, we will fill it later
1854
1855 Packer<size_t,S>::pack(mem,header_inf.size()-1,sts);
1856
1857 for (size_t i = 0 ; i < dim ; i++)
1858 {Packer<size_t,S>::pack(mem,getGrid().size(i),sts);}
1859
1860 // Here we pack the memory (skip the first background chunk)
1861
1862 for (size_t i = 1 ; i < header_inf.size() ; i++)
1863 {
1864 auto & hm = header_mask.get(i);
1865 auto & hc = header_inf.get(i);
1866
1867 Packer<decltype(hm.mask),S>::pack(mem,hm.mask,sts);
1868 Packer<decltype(hc.pos),S>::pack(mem,hc.pos,sts);
1869 Packer<decltype(hc.nele),S>::pack(mem,hc.nele,sts);
1870
1871 // we iterate all the points
1872
1873 int mask_nele;
1874 short unsigned int mask_it[chunking::size::value];
1875
1876 fill_mask(mask_it,hm.mask,mask_nele);
1877
1878 for (size_t j = 0 ; j < mask_nele ; j++)
1879 {
1880 Packer<decltype(chunks.get_o(i)),
1881 S,
1882 PACKER_ENCAP_OBJECTS_CHUNKING>::template pack<T,prp...>(mem,chunks.get_o(i),mask_it[j],sts);
1883 };
1884 }
1885 }
1886
1887 /*! \brief It does materially nothing
1888 *
1889 */
1890 void setMemory()
1891 {}
1892
1893
1894 /*! \number of element inserted
1895 *
1896 * \warning this function is not as fast as the size in other structures
1897 *
1898 * \return the total number of elements inserted
1899 *
1900 */
1901 size_t size() const
1902 {
1903 size_t tot = 0;
1904
1905 for (size_t i = 1 ; i < header_inf.size() ; i++)
1906 {
1907 tot += header_inf.get(i).nele;
1908 }
1909
1910 return tot;
1911 }
1912
1913 /*! \number of element inserted
1914 *
1915 * \warning this function is not as fast as the size in other structures
1916 *
1917 * \return the total number of elements inserted
1918 *
1919 */
1920 size_t size_all() const
1921 {
1922 return header_inf.size() * vmpl_reduce_prod<typename chunking::type>::type::value;
1923 }
1924
1925 /*! \brief Remove all the points in this region
1926 *
1927 * \param box_src box to kill the points
1928 *
1929 */
1930 void remove(Box<dim,long int> & section_to_delete)
1931 {
1932 grid_sm<dim,void> gs_cnk(sz_cnk);
1933
1934 for (size_t i = 0 ; i < header_inf.size() ; i++)
1935 {
1936 auto & hm = header_mask.get(i);
1937 auto & hc = header_inf.get(i);
1938
1939 Box<dim,size_t> bc;
1940
1941 for (size_t j = 0 ; j < dim ; j++)
1942 {
1943 bc.setLow(j,hc.pos.get(j));
1944 bc.setHigh(j,hc.pos.get(j) + sz_cnk[j] - 1);
1945 }
1946
1947 // now we intersect the chunk box with the box
1948
1949 Box<dim,size_t> inte;
1950 bool stp = bc.Intersect(section_to_delete,inte);
1951
1952 if (stp == true)
1953 {
1954 // If it is intersect ok we have to check if there are points to pack
1955 // we shift inte intp the chunk origin
1956
1957 inte -= header_inf.get(i).pos.toPoint();
1958
1959 // we iterate all the points
1960
1961 grid_key_dx_iterator_sub<dim,no_stencil,grid_sm<dim,void>> sit(gs_cnk,inte.getKP1(),inte.getKP2());
1962
1963 while (sit.isNext())
1964 {
1965 auto key = sit.get();
1966
1967 size_t sub_id = gs_cnk.LinId(key);
1968
1969 unsigned char swt = header_mask.get(i).mask[sub_id];
1970
1971 hc.nele = (swt)?hc.nele-1:hc.nele;
1972 hm.mask[sub_id] = 0;
1973
1974 if (hc.nele == 0 && swt != 0)
1975 {
1976 // Add the chunks in the empty list
1977 empty_v.add(i);
1978 }
1979
1980 ++sit;
1981 }
1982 }
1983 }
1984
1985 remove_empty();
1986 }
1987
1988 void copy_to(const self & grid_src,
1989 const Box<dim,size_t> & box_src,
1990 const Box<dim,size_t> & box_dst)
1991 {
1992/* auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
1993
1994 while (it.isNext())
1995 {
1996 auto key_src = it.get();
1997 grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
1998 key_dst -= box_src.getKP1();
1999
2000 typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
2001
2002 copy_sparse_to_sparse<dim,gcopy,gcopy> caps(grid_src,*this,key_src,key_dst);
2003 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(caps);
2004
2005 ++it;
2006 }*/
2007
2008 auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
2009
2010 while (it.isNext())
2011 {
2012 auto key_src = it.get();
2013 grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
2014 key_dst -= box_src.getKP1();
2015 auto key_src_s = it.getKeyF();
2016
2017 typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
2018
2019 size_t pos_src_id = key_src_s.getPos();
2020 size_t pos_dst_id;
2021 auto block_src = grid_src.getBlock(key_src_s);
2022 auto block_dst = this->insert_o(key_dst,pos_dst_id);
2023
2024 copy_sparse_to_sparse_bb<dim,decltype(block_src),decltype(block_dst),T> caps(block_src,block_dst,pos_src_id,pos_dst_id);
2025 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(caps);
2026
2027 ++it;
2028 }
2029
2030// copy_remove_to_impl(grid_src,*this,box_src,box_dst);
2031 }
2032
2033 template<template <typename,typename> class op, unsigned int ... prp >
2034 void copy_to_op(const self & grid_src,
2035 const Box<dim,size_t> & box_src,
2036 const Box<dim,size_t> & box_dst)
2037 {
2038 auto it = grid_src.getIterator(box_src.getKP1(),box_src.getKP2());
2039
2040 while (it.isNext())
2041 {
2042 auto key_src = it.get();
2043 grid_key_dx<dim> key_dst = key_src + box_dst.getKP1();
2044 key_dst -= box_src.getKP1();
2045
2046 typedef typename std::remove_const<typename std::remove_reference<decltype(grid_src)>::type>::type gcopy;
2047
2048 copy_sparse_to_sparse_op<op,dim,gcopy,gcopy,prp ...> caps(grid_src,*this,key_src,key_dst);
2049 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,sizeof...(prp)> >(caps);
2050
2051 ++it;
2052 }
2053 }
2054
2055 /*! \brief Give a grid point it return the chunk containing that point. In case the point does not exist it return the
2056 * background chunk
2057 *
2058 * \param v1 key
2059 * \param exist return true if the chunk exist
2060 *
2061 * \return the chunk containing that point
2062 *
2063 */
2064 size_t getChunk(grid_key_dx<dim> & v1, bool & exist)
2065 {
2066 size_t act_cnk = chunks.size()-1;
2067
2068 find_active_chunk(v1,act_cnk,exist);
2069
2070 return act_cnk;
2071 }
2072
2073 /*! \brief Get the position of a chunk
2074 *
2075 * \param chunk_id
2076 *
2077 * \return the position of the chunk
2078 *
2079 */
2080 grid_key_dx<dim> getChunkPos(size_t chunk_id)
2081 {
2082 grid_key_dx<dim> kl;
2083 grid_key_dx<dim> kh = header_inf.get(chunk_id).pos;
2084
2085 // shift the key
2086 key_shift<dim,chunking>::shift(kh,kl);
2087
2088 return kh;
2089 }
2090
2091 /*! \brief apply a convolution using the stencil N
2092 *
2093 *
2094 */
2095 template<unsigned int prop_src, unsigned int prop_dst, unsigned int stencil_size, unsigned int N, typename lambda_f, typename ... ArgsT >
2096 void conv(int (& stencil)[N][dim], grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2097 {
2098 NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2099
2100 if (findNN == false)
2101 {conv_impl<dim>::template conv<false,NNStar_c<dim>,prop_src,prop_dst,stencil_size>(stencil,start,stop,*this,func);}
2102 else
2103 {conv_impl<dim>::template conv<true,NNStar_c<dim>,prop_src,prop_dst,stencil_size>(stencil,start,stop,*this,func);}
2104
2105 findNN = true;
2106 }
2107
2108 /*! \brief apply a convolution from start to stop point using the function func and arguments args
2109 *
2110 * \param start point
2111 * \param stop point
2112 * \param func lambda function
2113 * \param args arguments to pass
2114 *
2115 */
2116 template<unsigned int prop_src, unsigned int prop_dst, unsigned int stencil_size, typename lambda_f, typename ... ArgsT >
2117 void conv_cross(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2118 {
2119 NNlist.resize(2*dim * chunks.size());
2120
2121 if (findNN == false)
2122 {conv_impl<dim>::template conv_cross<false,prop_src,prop_dst,stencil_size>(start,stop,*this,func);}
2123 else
2124 {conv_impl<dim>::template conv_cross<true,prop_src,prop_dst,stencil_size>(start,stop,*this,func);}
2125
2126 findNN = true;
2127 }
2128
2129 /*! \brief apply a convolution from start to stop point using the function func and arguments args
2130 *
2131 * \param start point
2132 * \param stop point
2133 * \param func lambda function
2134 * \param args arguments to pass
2135 *
2136 */
2137 template<unsigned int stencil_size, typename prop_type, typename lambda_f, typename ... ArgsT >
2138 void conv_cross_ids(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2139 {
2140 if (layout_base<aggregate<int>>::type_value::value != SOA_layout_IA)
2141 {
2142 std::cout << __FILE__ << ":" << __LINE__ << " Error this function can be only used with the SOA version of the data-structure" << std::endl;
2143 }
2144
2145 NNlist.resize(2*dim * chunks.size());
2146
2147 if (findNN == false)
2148 {conv_impl<dim>::template conv_cross_ids<false,stencil_size,prop_type>(start,stop,*this,func);}
2149 else
2150 {conv_impl<dim>::template conv_cross_ids<true,stencil_size,prop_type>(start,stop,*this,func);}
2151
2152 findNN = true;
2153 }
2154
2155 /*! \brief apply a convolution using the stencil N
2156 *
2157 *
2158 */
2159 template<unsigned int prop_src1, unsigned int prop_src2 ,unsigned int prop_dst1, unsigned int prop_dst2 ,unsigned int stencil_size, unsigned int N, typename lambda_f, typename ... ArgsT >
2160 void conv2(int (& stencil)[N][dim], grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2161 {
2162 NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2163
2164 if (findNN == false)
2165 {conv_impl<dim>::template conv2<false,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*this,func);}
2166 else
2167 {conv_impl<dim>::template conv2<true,NNStar_c<dim>,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(stencil,start,stop,*this,func);}
2168
2169 findNN = true;
2170 }
2171
2172 /*! \brief apply a convolution using the stencil N
2173 *
2174 *
2175 */
2176 template<unsigned int prop_src1, unsigned int prop_src2 ,unsigned int prop_dst1, unsigned int prop_dst2 ,unsigned int stencil_size, typename lambda_f, typename ... ArgsT >
2177 void conv_cross2(grid_key_dx<3> start, grid_key_dx<3> stop , lambda_f func, ArgsT ... args)
2178 {
2179 NNlist.resize(NNStar_c<dim>::nNN * chunks.size());
2180
2181 if (findNN == false)
2182 {conv_impl<dim>::template conv_cross2<false,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*this,func);}
2183 else
2184 {conv_impl<dim>::template conv_cross2<true,prop_src1,prop_src2,prop_dst1,prop_dst2,stencil_size>(start,stop,*this,func);}
2185
2186 findNN = true;
2187 }
2188
2189 /*! \brief unpack the sub-grid object
2190 *
2191 * \tparam prp properties to unpack
2192 *
2193 * \param mem preallocated memory from where to unpack the object
2194 * \param sub sub-grid iterator
2195 * \param obj object where to unpack
2196 *
2197 */
2198 template<unsigned int ... prp, typename S2,typename context_type>
2199 void unpack(ExtPreAlloc<S2> & mem,
2200 grid_key_sparse_dx_iterator_sub<dims,chunking::size::value> & sub_it,
2201 Unpack_stat & ps,
2202 context_type & context,
2203 rem_copy_opt opt)
2204 {
2205 short unsigned int mask_it[chunking::size::value];
2206
2207 // first we unpack the number of chunks
2208
2209 size_t n_chunks;
2210
2211 Unpacker<size_t,S2>::unpack(mem,n_chunks,ps);
2212
2213 size_t sz[dim];
2214 for (size_t i = 0 ; i < dim ; i++)
2215 {Unpacker<size_t,S2>::unpack(mem,sz[i],ps);}
2216
2217 openfpm::vector<cheader<dim>> header_inf_tmp;
2218 openfpm::vector<mheader<chunking::size::value>> header_mask_tmp;
2219 openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > chunks_tmp;
2220
2221 header_inf_tmp.resize(n_chunks);
2222 header_mask_tmp.resize(n_chunks);
2223 chunks_tmp.resize(n_chunks);
2224
2225 for (size_t i = 0 ; i < n_chunks ; i++)
2226 {
2227 auto & hc = header_inf_tmp.get(i);
2228 auto & hm = header_mask_tmp.get(i);
2229
2230 Unpacker<typename std::remove_reference<decltype(header_mask.get(i).mask)>::type ,S2>::unpack(mem,hm.mask,ps);
2231 Unpacker<typename std::remove_reference<decltype(header_inf.get(i).pos)>::type ,S2>::unpack(mem,hc.pos,ps);
2232 Unpacker<typename std::remove_reference<decltype(header_inf.get(i).nele)>::type ,S2>::unpack(mem,hc.nele,ps);
2233
2234 // fill the mask_it
2235
2236 fill_mask(mask_it,hm.mask,hc.nele);
2237
2238 // now we unpack the information
2239 size_t active_cnk;
2240 size_t ele_id;
2241
2242 for (size_t k = 0 ; k < hc.nele ; k++)
2243 {
2244 // construct v1
2245 grid_key_dx<dim> v1;
2246 for (size_t i = 0 ; i < dim ; i++)
2247 {v1.set_d(i,hc.pos.get(i) + pos_chunk[mask_it[k]].get(i) + sub_it.getStart().get(i));}
2248
2249 pre_insert(v1,active_cnk,ele_id);
2250
2251 Unpacker<decltype(chunks.get_o(mask_it[k])),
2252 S2,
2253 PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack<T,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2254
2255 }
2256 }
2257 }
2258
2259 /*! \brief unpack the sub-grid object
2260 *
2261 * \tparam prp properties to unpack
2262 *
2263 * \param mem preallocated memory from where to unpack the object
2264 * \param sub sub-grid iterator
2265 * \param obj object where to unpack
2266 *
2267 */
2268 template<unsigned int ... prp, typename S2>
2269 void unpack(ExtPreAlloc<S2> & mem,
2270 Unpack_stat & ps)
2271 {
2272 this->clear();
2273
2274 grid_key_dx<dim> start;
2275 grid_key_dx<dim> stop;
2276
2277 // We preunpack some data
2278 Unpack_stat ps_tmp = ps;
2279
2280 size_t unused;
2281 Unpacker<size_t,S2>::unpack(mem,unused,ps_tmp);
2282
2283 size_t sz[dim];
2284 for (size_t i = 0 ; i < dim ; i++)
2285 {Unpacker<size_t,S2>::unpack(mem,sz[i],ps_tmp);}
2286
2287 g_sm.setDimensions(sz);
2288 for (size_t i = 0 ; i < dim ; i++)
2289 {
2290 start.set_d(i,0);
2291 stop.set_d(i,getGrid().size(i)-1);
2292 }
2293
2294 set_g_shift_from_size(sz,g_sm_shift);
2295
2296 auto sub_it = this->getIterator(start,stop);
2297
2298 int ctx;
2299 unpack<prp...>(mem,sub_it,ps,ctx,rem_copy_opt::NONE_OPT);
2300 }
2301
2302 /*! \brief unpack the sub-grid object applying an operation
2303 *
2304 * \tparam op operation
2305 * \tparam prp properties to unpack
2306 *
2307 * \param mem preallocated memory from where to unpack the object
2308 * \param sub sub-grid iterator
2309 * \param obj object where to unpack
2310 *
2311 */
2312 template<template<typename,typename> class op, typename S2, unsigned int ... prp>
2313 void unpack_with_op(ExtPreAlloc<S2> & mem,
2314 grid_key_sparse_dx_iterator_sub<dim,chunking::size::value> & sub2,
2315 Unpack_stat & ps)
2316 {
2317 short unsigned int mask_it[chunking::size::value];
2318
2319 // first we unpack the number of chunks
2320
2321 size_t n_chunks;
2322
2323 Unpacker<size_t,S2>::unpack(mem,n_chunks,ps);
2324
2325 size_t sz[dim];
2326 for (size_t i = 0 ; i < dim ; i++)
2327 {Unpacker<size_t,S2>::unpack(mem,sz[i],ps);}
2328
2329 openfpm::vector<cheader<dim>> header_inf_tmp;
2330 openfpm::vector<mheader<chunking::size::value>> header_mask_tmp;
2331 openfpm::vector<aggregate_bfv<chunk_def>> chunks_tmp;
2332
2333 header_inf_tmp.resize(n_chunks);
2334 header_mask_tmp.resize(n_chunks);
2335 chunks_tmp.resize(n_chunks);
2336
2337 for (size_t i = 0 ; i < n_chunks ; i++)
2338 {
2339 auto & hc = header_inf_tmp.get(i);
2340 auto & hm = header_mask_tmp.get(i);
2341
2342 Unpacker<decltype(hm.mask),S2>::unpack(mem,hm.mask,ps);
2343 Unpacker<decltype(hc.pos),S2>::unpack(mem,hc.pos,ps);
2344 Unpacker<decltype(hc.nele),S2>::unpack(mem,hc.nele,ps);
2345
2346 // fill the mask_it
2347
2348 fill_mask(mask_it,hm.mask,hc.nele);
2349
2350 // now we unpack the information
2351 size_t active_cnk;
2352 size_t ele_id;
2353
2354 for (size_t k = 0 ; k < hc.nele ; k++)
2355 {
2356 // construct v1
2357 grid_key_dx<dim> v1;
2358 for (size_t i = 0 ; i < dim ; i++)
2359 {v1.set_d(i,hc.pos.get(i) + pos_chunk[mask_it[k]].get(i) + sub2.getStart().get(i));}
2360
2361 bool exist = pre_insert(v1,active_cnk,ele_id);
2362
2363 if (exist == false)
2364 {
2365 Unpacker<decltype(chunks.get_o(mask_it[k])),
2366 S2,
2367 PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<replace_,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2368 }
2369 else
2370 {
2371 Unpacker<decltype(chunks.get_o(mask_it[k])),
2372 S2,
2373 PACKER_ENCAP_OBJECTS_CHUNKING>::template unpack_op<op,prp...>(mem,chunks.get_o(active_cnk),ele_id,ps);
2374 }
2375
2376 }
2377 }
2378 }
2379
2380 /*! \brief This is a meta-function return which type of sub iterator a grid produce
2381 *
2382 * \return the type of the sub-grid iterator
2383 *
2384 */
2385 template <typename stencil = no_stencil>
2386 static grid_key_sparse_dx_iterator_sub<dim, chunking::size::value>
2387 type_of_subiterator()
2388 {
2389 return grid_key_sparse_dx_iterator_sub<dim, chunking::size::value>();
2390 }
2391
2392 /*! \brief This is a meta-function return which type of sub iterator a grid produce
2393 *
2394 * \return the type of the sub-grid iterator
2395 *
2396 */
2397 static grid_key_sparse_dx_iterator<dim, chunking::size::value>
2398 type_of_iterator()
2399 {
2400 return grid_key_sparse_dx_iterator<dim, chunking::size::value>();
2401 }
2402
2403 /*! \brief Here we convert the linearized sparse key into the grid_key_dx
2404 *
2405 * \param key_out output key
2406 * \param key_in input key
2407 *
2408 */
2409 void convert_key(grid_key_dx<dim> & key_out, const grid_key_sparse_lin_dx & key_in) const
2410 {
2411 auto & ph = header_inf.get(key_in.getChunk()).pos;
2412 auto & pos_h = pos_chunk[key_in.getPos()];
2413
2414 for (size_t i = 0 ; i < dim ; i++)
2415 {
2416 key_out.set_d(i,ph.get(i) + pos_h.get(i));
2417 }
2418 }
2419
2420 /*! \brief copy an sparse grid
2421 *
2422 * \param tmp sparse grdi to copy
2423 *
2424 */
2425 sgrid_cpu & operator=(const sgrid_cpu & sg)
2426 {
2427 cache_pnt = sg.cache_pnt;
2428
2429 for (size_t i = 0 ; i < SGRID_CACHE ; i++)
2430 {
2431 cache[i] = sg.cache[i];
2432 cached_id[i] = sg.cached_id[i];
2433 }
2434
2435 //! Map to convert from grid coordinates to chunk
2436 map = sg.map;
2437 header_inf = sg.header_inf;
2438 header_mask = sg.header_mask;
2439 chunks = sg.chunks;
2440 g_sm = sg.g_sm;
2441 g_sm_shift = sg.g_sm_shift;
2442
2443 for (size_t i = 0 ; i < chunking::size::value ; i++)
2444 {
2445 pos_chunk[i] = sg.pos_chunk[i];
2446 }
2447
2448
2449 for (size_t i = 0 ; i < dim ; i++)
2450 {sz_cnk[i] = sg.sz_cnk[i];}
2451
2452 empty_v = sg.empty_v;
2453
2454 return *this;
2455 }
2456
2457 /*! \brief Reorder based on index
2458 *
2459 *
2460 */
2461 void reorder()
2462 {
2463 openfpm::vector<cheader<dim>,S> header_inf_tmp;
2464 openfpm::vector<mheader<chunking::size::value>,S> header_mask_tmp;
2465 openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > chunks_tmp;
2466
2467 header_inf_tmp.resize(header_inf.size());
2468 header_mask_tmp.resize(header_mask.size());
2469 chunks_tmp.resize(chunks.size());
2470
2471 struct pair_int
2472 {
2473 int id;
2474 int pos;
2475
2476 bool operator<(const pair_int & tmp) const
2477 {
2478 return id < tmp.id;
2479 }
2480 };
2481
2482 openfpm::vector<pair_int> srt;
2483 srt.resize(header_inf.size());
2484
2485 for (int i = 0 ; i < header_inf.size() ; i++)
2486 {
2487 grid_key_dx<dim> kh = header_inf.get(i).pos;
2488 grid_key_dx<dim> kl;
2489
2490 // shift the key
2491 key_shift<dim,chunking>::shift(kh,kl);
2492
2493 long int lin_id = g_sm_shift.LinId(kh);
2494
2495 srt.get(i).id = lin_id;
2496 srt.get(i).pos = i;
2497 }
2498
2499 srt.sort();
2500
2501 // now reoder
2502
2503 for (int i = 0 ; i < srt.size() ; i++)
2504 {
2505 chunks_tmp.get(i) = chunks.get(srt.get(i).pos);
2506 header_inf_tmp.get(i) = header_inf.get(srt.get(i).pos);
2507 header_mask_tmp.get(i) = header_mask.get(srt.get(i).pos);
2508 }
2509
2510 chunks_tmp.swap(chunks);
2511 header_inf_tmp.swap(header_inf);
2512 header_mask_tmp.swap(header_mask);
2513
2514 clear_cache();
2515 reconstruct_map();
2516
2517 empty_v.clear();
2518 findNN = false;
2519 NNlist.clear();
2520 }
2521
2522 /*! \brief copy an sparse grid
2523 *
2524 * \param tmp sparse grdi to copy
2525 *
2526 */
2527 sgrid_cpu & operator=(sgrid_cpu && sg)
2528 {
2529 cache_pnt = sg.cache_pnt;
2530
2531 for (size_t i = 0 ; i < SGRID_CACHE ; i++)
2532 {
2533 cache[i] = sg.cache[i];
2534 cached_id[i] = sg.cached_id[i];
2535 }
2536
2537 //! Map to convert from grid coordinates to chunk
2538 map.swap(sg.map);
2539 header_inf.swap(sg.header_inf);
2540 header_mask.swap(sg.header_mask);
2541 chunks.swap(sg.chunks);
2542 g_sm = sg.g_sm;
2543 g_sm_shift = sg.g_sm_shift;
2544
2545 for (size_t i = 0 ; i < chunking::size::value ; i++)
2546 {
2547 pos_chunk[i] = sg.pos_chunk[i];
2548 }
2549
2550
2551 for (size_t i = 0 ; i < dim ; i++)
2552 {sz_cnk[i] = sg.sz_cnk[i];}
2553
2554 empty_v = sg.empty_v;
2555
2556 return *this;
2557 }
2558
2559 /*! \brief Get the number of inserted points
2560 *
2561 * \return the number of inserted points
2562 *
2563 */
2564 size_t size_inserted()
2565 {
2566 return size();
2567 }
2568
2569 /*! \brief delete all the points
2570 *
2571 *
2572 */
2573 void clear()
2574 {
2575 header_inf.resize(1);
2576 header_mask.resize(1);
2577 chunks.resize(1);
2578
2579 clear_cache();
2580 reconstruct_map();
2581 }
2582
2583 /*! \brief This is an internal function to clear the cache
2584 *
2585 *
2586 *
2587 */
2588 void internal_clear_cache()
2589 {
2590 clear_cache();
2591 }
2592
2593#ifdef OPENFPM_DATA_ENABLE_IO_MODULE
2594
2595 /*! \brief write the sparse grid into VTK
2596 *
2597 * \param out VTK output
2598 *
2599 */
2600 template<typename Tw = float> bool write(const std::string & output)
2601 {
2602 file_type ft = file_type::BINARY;
2603
2604 openfpm::vector<Point<dim,Tw>> tmp_pos;
2605 openfpm::vector<T> tmp_prp;
2606
2607 // copy position and properties
2608
2609 auto it = getIterator();
2610
2611 while(it.isNext())
2612 {
2613 auto key = it.getKey();
2614 auto keyg = it.getKeyF();
2615
2616 Point<dim,Tw> p;
2617
2618 for (size_t i = 0 ; i < dim ; i++)
2619 {p.get(i) = keyg.get(i);}
2620
2621 tmp_pos.add(p);
2622
2623 tmp_prp.add();
2624 copy_prop_to_vector<decltype(chunks.get_o(key.getChunk())),decltype(tmp_prp.last())>
2625 cp(chunks.get_o(key.getChunk()),tmp_prp.last(),key.getPos());
2626
2627 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,T::max_prop> >(cp);
2628
2629 ++it;
2630 }
2631
2632 // VTKWriter for a set of points
2633 VTKWriter<boost::mpl::pair<openfpm::vector<Point<dim,Tw>>, openfpm::vector<T>>, VECTOR_POINTS> vtk_writer;
2634 vtk_writer.add(tmp_pos,tmp_prp,tmp_pos.size());
2635
2636 openfpm::vector<std::string> prp_names;
2637
2638 // Write the VTK file
2639 return vtk_writer.write(output,prp_names,"sparse_grid",ft);
2640 }
2641
2642#endif
2643
2644 //Functions to check if the packing object is complex
2645 static bool pack()
2646 {
2647 return false;
2648 }
2649
2650 static bool packRequest()
2651 {
2652 return false;
2653 }
2654
2655 static bool packMem()
2656 {
2657 return false;
2658 }
2659
2660 /*! \brief return the header section of the blocks
2661 *
2662 * \return the header data section of the chunks stored
2663 *
2664 */
2665 openfpm::vector<cheader<dim>> & private_get_header_inf()
2666 {
2667 return header_inf;
2668 }
2669
2670 /*! \brief return the header section of the blocks
2671 *
2672 * \return the header data section of the chunks stored
2673 *
2674 */
2675 openfpm::vector<mheader<chunking::size::value>> & private_get_header_mask()
2676 {
2677 return header_mask;
2678 }
2679
2680 /*! \brief return the NN list for each block
2681 *
2682 * \return the NN list for each block
2683 *
2684 */
2685 openfpm::vector<int> & private_get_nnlist()
2686 {
2687 return NNlist;
2688 }
2689
2690 /*! \brief return the data of the blocks
2691 *
2692 * \return the data of the blocks
2693 *
2694 */
2695 openfpm::vector<aggregate_bfv<chunk_def>,S,layout_base > & private_get_data()
2696 {
2697 return chunks;
2698 }
2699
2700 /*! \brief return the header section of the blocks
2701 *
2702 * \return the header data section of the chunks stored
2703 *
2704 */
2705 const openfpm::vector<cheader<dim>> & private_get_header_inf() const
2706 {
2707 return header_inf;
2708 }
2709
2710 /*! \brief return the header section of the blocks
2711 *
2712 * \return the header data section of the chunks stored
2713 *
2714 */
2715 const openfpm::vector<mheader<chunking::size::value>> & private_get_header_mask() const
2716 {
2717 return header_mask;
2718 }
2719
2720 /*! \brief return the data of the blocks
2721 *
2722 * \return the data of the blocks
2723 *
2724 */
2725 const openfpm::vector<aggregate_bfv<chunk_def>> & private_get_data() const
2726 {
2727 return chunks;
2728 }
2729
2730 /*! \brief This function check the consistency of the sparse grid
2731 *
2732 *
2733 */
2734 void consistency()
2735 {
2736 size_t tot = 0;
2737 for (int i = 1 ; i < header_mask.size() ; i++)
2738 {
2739 auto & m = header_mask.get(i);
2740
2741 size_t np_mask = 0;
2742
2743 for (int j = 0 ; j < chunking::size::value ; j++)
2744 {
2745 if (m.mask[j] & 0x1)
2746 {np_mask++;}
2747 }
2748
2749 if (header_inf.get(i).nele != np_mask)
2750 {
2751 std::cout << __FILE__ << ":" << __LINE__ << " error chunk: " << i << " has " << np_mask << " points but header report " << header_inf.get(i).nele << std::endl;
2752 }
2753 tot += np_mask;
2754 }
2755
2756 if (size() != tot)
2757 {
2758 std::cout << __FILE__ << ":" << __LINE__ << " Total point is inconsistent: " << size() << " " << tot << std::endl;
2759 }
2760 }
2761};
2762
2763template<unsigned int dim,
2764 typename T,
2765 typename S,
2766 typename grid_lin = grid_zm<dim,void>,
2767 typename layout = typename memory_traits_inte<T>::type,
2768 template<typename> class layout_base = memory_traits_inte,
2769 typename chunking = default_chunking<dim>>
2770using sgrid_soa = sgrid_cpu<dim,T,S,grid_lin,layout,layout_base,chunking>;
2771
2772
2773#endif /* OPENFPM_DATA_SRC_SPARSEGRID_SPARSEGRID_HPP_ */
2774