1/*
2 * SparseGrid_iterator_block.hpp
3 *
4 * Created on: Feb 25, 2020
5 * Author: i-bird
6 */
7
8#ifndef SPARSEGRID_ITERATOR_BLOCK_HPP_
9#define SPARSEGRID_ITERATOR_BLOCK_HPP_
10
11#include "Grid/iterators/grid_skin_iterator.hpp"
12#include "SparseGrid_chunk_copy.hpp"
13
14
15template<int c,bool is_neg = c < 0>
16struct fix_neg_to_one
17{
18 typedef boost::mpl::int_<c> type;
19};
20
21template<int c>
22struct fix_neg_to_one<c,true>
23{
24 typedef boost::mpl::int_<1> type;
25};
26
27/*! \brief this class is a functor for "for_each" algorithm
28 *
29 * This class is a functor for "for_each" algorithm. For each
30 * element of the boost::vector the operator() is called.
31 * Is mainly used to set a grid info
32 *
33 * \tparam grid_sm_type
34 * \tparam vector_blocks_ext
35 *
36 */
37template<unsigned int dim, typename vector_blocks_ext>
38struct calc_loc
39{
40 grid_key_dx<dim> & k;
41
42 /*! \brief constructor
43 *
44 * \param v set of pointer buffers to set
45 *
46 */
47 calc_loc(grid_key_dx<dim> & k)
48 :k(k)
49 {};
50
51 //! It call the copy function for each property
52 template<typename T>
53 inline void operator()(T& val)
54 {
55 k.set_d(T::value,boost::mpl::at<typename vector_blocks_ext::type,boost::mpl::int_<T::value>>::type::value*k.get(T::value));
56 }
57};
58
59
60
61/*! \brief this class is a functor for "for_each" algorithm
62 *
63 * This class is a functor for "for_each" algorithm. For each
64 * element of the boost::vector the operator() is called.
65 * Is mainly used to set a grid info
66 *
67 * \tparam grid_sm_type
68 * \tparam vector_blocks_ext
69 *
70 */
71template<unsigned int dim, typename header_type, typename vector_blocks_ext>
72struct fill_chunk_block
73{
74 //! sizes
75 Box<dim,size_t> cnk_box;
76
77 unsigned int chunk_id;
78
79 header_type & header;
80
81 /*! \brief constructor
82 *
83 * \param v set of pointer buffers to set
84 *
85 */
86 inline fill_chunk_block(header_type & header, unsigned int chunk_id)
87 :chunk_id(chunk_id),header(header)
88 {};
89
90 //! It call the copy function for each property
91 template<typename T>
92 inline void operator()(T& val)
93 {
94 cnk_box.setLow(T::value,header.get(chunk_id).pos.get(T::value));
95 cnk_box.setHigh(T::value,header.get(chunk_id).pos.get(T::value) + boost::mpl::at<typename vector_blocks_ext::type,T>::type::value - 1);
96 }
97};
98
99template<unsigned int prop, unsigned int stencil_size, unsigned int dim, typename vector_blocks_exts, typename vector_ext>
100struct loadBlock_impl
101{
102 template<unsigned int N1, typename T, typename SparseGridType>
103 static void loadBlock(T arr[N1], SparseGridType & sgt, int chunk_id, unsigned char mask[N1])
104 {
105 get_block_sizes<dim,stencil_size,vector_blocks_exts,vector_ext> gbs;
106
107 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
108
109 grid_sm<dim,void> g_block(gbs.sz_ext);
110 grid_sm<dim,void> g_in_block(gbs.sz_block);
111 grid_sm<dim,void> g_block_arr(gbs.sz_tot);
112;
113 grid_key_dx_iterator<dim> it_in_block(g_in_block);
114
115 auto & data = sgt.private_get_data();
116 auto & header_mask = sgt.private_get_header_mask();
117
118 auto & h = header_mask.get(chunk_id);
119
120 auto & ref_block = data.template get<prop>(chunk_id);
121
122 while(it_in_block.isNext())
123 {
124 auto p = it_in_block.get();
125
126 grid_key_dx<dim> arr_p;
127
128 for (int i = 0 ; i < dim ; i++)
129 {arr_p.set_d(i,p.get(i)+stencil_size);}
130
131 size_t id = g_block_arr.LinId(arr_p);
132 size_t idi = g_in_block.LinId(p);
133
134 arr[id] = ref_block[idi];
135 mask[id] = exist_sub(h,idi);
136
137 ++it_in_block;
138 }
139 }
140
141 template<unsigned int N1, typename T, typename SparseGridType>
142 static void loadBlock(T arr[N1], SparseGridType & sgt, int chunk_id)
143 {
144 get_block_sizes<dim,stencil_size,vector_blocks_exts,vector_ext> gbs;
145
146 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
147
148 grid_sm<dim,void> g_block(gbs.sz_ext);
149 grid_sm<dim,void> g_in_block(gbs.sz_block);
150 grid_sm<dim,void> g_block_arr(gbs.sz_tot);
151
152 grid_key_dx_iterator<dim> it_in_block(g_in_block);
153
154 auto & data = sgt.private_get_data();
155 auto & header_mask = sgt.private_get_header_mask();
156
157 auto & ref_block = data.template get<prop>(chunk_id);
158
159 while(it_in_block.isNext())
160 {
161 auto p = it_in_block.get();
162
163 grid_key_dx<dim> arr_p;
164
165 for (int i = 0 ; i < dim ; i++)
166 {arr_p.set_d(i,p.get(i)+stencil_size);}
167
168 arr[g_block_arr.LinId(arr_p)] = ref_block[g_in_block.LinId(p)];
169
170 ++it_in_block;
171 }
172 }
173
174 template<unsigned int N1, typename T, typename SparseGridType>
175 static void storeBlock(T arr[N1], SparseGridType & sgt, int chunk_id)
176 {
177 get_block_sizes<dim,stencil_size,vector_blocks_exts,vector_ext> gbs;
178
179 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
180
181 grid_sm<dim,void> g_block(gbs.sz_ext);
182 grid_sm<dim,void> g_in_block(gbs.sz_block);
183 grid_sm<dim,void> g_block_arr(gbs.sz_tot);
184
185 grid_key_dx_iterator<dim> it_in_block(g_in_block);
186
187 auto & data = sgt.private_get_data();
188 auto & header_mask = sgt.private_get_header_mask();
189
190 auto & ref_block = data.template get<prop>(chunk_id);
191
192 while(it_in_block.isNext())
193 {
194 auto p = it_in_block.get();
195
196 ref_block[g_in_block.LinId(p)] = arr[g_in_block.LinId(p)];
197
198 ++it_in_block;
199 }
200 }
201
202
203 /*! \brief load the border
204 *
205 *
206 *
207 */
208 template<unsigned int N1, typename T, typename SparseGridType>
209 static void loadBorder(T arr[N1],
210 SparseGridType & sgt,
211 size_t chunk_id,
212 openfpm::vector<unsigned int> & bord,
213 openfpm::vector<grid_key_dx<SparseGridType::dims>> & block_skin,
214 openfpm::vector<unsigned int> & chunk_ids ,
215 openfpm::vector<short int> & offsets,
216 unsigned char mask[N1],
217 openfpm::vector<unsigned int> & maps_blk)
218 {
219 typedef typename generate_array_vector<size_t,typename vector_blocks_exts::type>::result size;
220
221 auto & data = sgt.private_get_data();
222 auto & header_mask = sgt.private_get_header_mask();
223 auto & header_inf = sgt.private_get_header_inf();
224
225 auto & hm = header_mask.get(chunk_id);
226 auto & hc = header_inf.get(chunk_id);
227
228 maps_blk.resize(block_skin.size());
229
230 for (int i = 0 ; i < maps_blk.size() ; i++)
231 {
232 grid_key_dx<dim> p;
233
234 for (int j = 0 ; j < dim ; j++)
235 {p.set_d(j,block_skin.get(i).get(j) + hc.pos.get(j) / size::data[j] - 1);}
236
237 maps_blk.get(i) = sgt.getChunk(p);
238 }
239
240 for (int i = 0 ; i < bord.size(); i++)
241 {
242 size_t ac = maps_blk.get(chunk_ids.get(i));
243
244 size_t b = bord.get(i);
245 size_t off = offsets.template get<0>(i);
246
247 auto & h = header_mask.get(ac);
248
249 arr[b] = (ac == data.size()-1)?data.template get<prop>(0)[off]:data.template get<prop>(ac)[off];
250 mask[b] = (ac == data.size()-1)?0:exist_sub(h,off);
251 }
252 }
253};
254
255/*! \brief optimized 3d version
256 *
257 */
258template<unsigned int prop, unsigned int stencil_size, typename vector_blocks_exts, typename vector_ext>
259struct loadBlock_impl<prop,stencil_size,3,vector_blocks_exts,vector_ext>
260{
261 template<unsigned int N1, typename T, typename SparseGridType>
262 inline static void loadBlock(T arr[N1], SparseGridType & sgt, int chunk_id, unsigned char mask[N1])
263 {
264 auto & data = sgt.private_get_data();
265 auto & header_mask = sgt.private_get_header_mask();
266
267 auto & h = header_mask.get(chunk_id);
268
269 // Faster version
270
271 auto & chunk = data.template get<prop>(chunk_id);
272
273 copy_xyz<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,false>::template copy<N1>(arr,mask,h,chunk);
274 }
275
276
277 template<unsigned int N1, typename T, typename SparseGridType>
278 inline static void storeBlock(T arr[N1], SparseGridType & sgt, int chunk_id)
279 {
280
281 auto & data = sgt.private_get_data();
282 auto & header_mask = sgt.private_get_header_mask();
283
284 // Faster version
285
286 auto & chunk = data.template get<prop>(chunk_id);
287
288 copy_xyz<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,false>::template store<N1>(arr,chunk);
289
290 }
291
292
293
294 /*! \brief load the border
295 *
296 *
297 *
298 */
299 template<bool findNN, typename NNType, unsigned int N1, typename T, typename SparseGridType>
300 inline static void loadBorder(T arr[N1],
301 SparseGridType & sgt,
302 size_t chunk_id,
303 openfpm::vector<unsigned int> & bord,
304 openfpm::vector<grid_key_dx<SparseGridType::dims>> & block_skin,
305 openfpm::vector<unsigned int> & chunk_ids ,
306 openfpm::vector<short int> & offsets,
307 unsigned char mask[N1],
308 openfpm::vector<unsigned int> & maps_blk)
309 {
310 typedef typename generate_array_vector<size_t,typename vector_blocks_exts::type>::result size;
311
312 auto & data = sgt.private_get_data();
313 auto & header_mask = sgt.private_get_header_mask();
314 auto & NNlist = sgt.private_get_nnlist();
315
316 auto & h = header_mask.get(chunk_id);
317
318
319 typedef typename generate_array_vector<size_t,typename vector_blocks_exts::type>::result size;
320
321 typedef typename boost::mpl::at<typename vector_blocks_exts::type,boost::mpl::int_<0>>::type sz0;
322 typedef typename boost::mpl::at<typename vector_blocks_exts::type,boost::mpl::int_<1>>::type sz1;
323 typedef typename boost::mpl::at<typename vector_blocks_exts::type,boost::mpl::int_<2>>::type sz2;
324
325 grid_key_dx<3> p;
326
327 bool exist;
328 long int r;
329 if (findNN == false)
330 {
331 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,0,1});
332 r = sgt.getChunk(p,exist);
333 NNlist.template get<0>(chunk_id*NNType::nNN) = (exist)?r:-1;
334 }
335 else
336 {
337 r = NNlist.template get<0>(chunk_id*NNType::nNN);
338 exist = (r != -1);
339 }
340 if (exist == true)
341 {
342 auto & h = header_mask.get(r);
343 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<0,stencil_size+sz2::value,N1>(arr,mask,h,data.get(r));
344 }
345 else
346 {
347 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<stencil_size+sz2::value,N1>(mask);
348 }
349 if (findNN == false)
350 {
351 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,0,-1});
352 r = sgt.getChunk(p,exist);
353 NNlist.template get<0>(chunk_id*NNType::nNN+1) = (exist)?r:-1;
354 }
355 else
356 {
357 r = NNlist.template get<0>(chunk_id*NNType::nNN+1);
358 exist = (r != -1);
359 }
360 if (exist == true)
361 {
362 auto & h = header_mask.get(r);
363 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<sz2::value - stencil_size,0,N1>(arr,mask,h,data.get(r));
364 }
365 else
366 {
367 copy_xy_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<0,N1>(mask);
368 }
369
370 if (findNN == false)
371 {
372 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,1,0});
373 r = sgt.getChunk(p,exist);
374 NNlist.template get<0>(chunk_id*NNType::nNN+2) = (exist)?r:-1;
375 }
376 else
377 {
378 r = NNlist.template get<0>(chunk_id*NNType::nNN+2);
379 exist = (r != -1);
380 }
381 if (exist == true)
382 {
383 auto & h = header_mask.get(r);
384 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<0,stencil_size+sz1::value,N1>(arr,mask,h,data.get(r));
385 }
386 else
387 {
388 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<stencil_size+sz1::value,N1>(mask);
389 }
390 if (findNN == false)
391 {
392 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({0,-1,0});
393 r = sgt.getChunk(p,exist);
394 NNlist.template get<0>(chunk_id*NNType::nNN+3) = (exist)?r:-1;
395 }
396 else
397 {
398 r = NNlist.template get<0>(chunk_id*NNType::nNN+3);
399 exist = (r != -1);
400 }
401 if (exist == true)
402 {
403 auto & h = header_mask.get(r);
404 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<sz1::value-stencil_size,0,N1>(arr,mask,h,data.get(r));
405 }
406 else
407 {
408 copy_xz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<0,N1>(mask);
409 }
410
411 if (findNN == false)
412 {
413 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({1,0,0});
414 r = sgt.getChunk(p,exist);
415 NNlist.template get<0>(chunk_id*NNType::nNN+4) = (exist)?r:-1;
416 }
417 else
418 {
419 r = NNlist.template get<0>(chunk_id*NNType::nNN+4);
420 exist = (r != -1);
421 }
422 if (exist == true)
423 {
424 auto & h = header_mask.get(r);
425 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<0,sz0::value+stencil_size,N1>(arr,mask,h,data.get(r));
426 }
427 else
428 {
429 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<sz0::value+stencil_size,N1>(mask);
430 }
431 if (findNN == false)
432 {
433 p = sgt.getChunkPos(chunk_id) + grid_key_dx<3>({-1,0,0});
434 r = sgt.getChunk(p,exist);
435 NNlist.template get<0>(chunk_id*NNType::nNN+5) = (exist)?r:-1;
436 }
437 else
438 {
439 r = NNlist.template get<0>(chunk_id*NNType::nNN+5);
440 exist = (r != -1);
441 }
442 if (exist == true)
443 {
444 auto & h = header_mask.get(r);
445 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template copy<sz0::value-stencil_size,0,N1>(arr,mask,h,data.get(r));
446 }
447 else
448 {
449 copy_yz_3<is_layout_inte<typename SparseGridType::memory_traits >::type::value,prop,stencil_size,typename vector_blocks_exts::type,NNType::is_cross>::template mask_null<0,N1>(mask);
450 }
451 }
452};
453
454
455/*! \brief Grid key sparse iterator on a sub-part of the domain
456 *
457 *
458 */
459template<unsigned dim,
460 unsigned int stencil_size,
461 typename SparseGridType,
462 typename vector_blocks_exts,
463 typename vector_ext = typename vmpl_create_constant<dim,1>::type>
464class grid_key_sparse_dx_iterator_block_sub
465{
466 //! SparseGrid
467 SparseGridType & spg;
468
469 //! point to the actual chunk
470 size_t chunk_id;
471
472 //! Starting point
473 grid_key_dx<dim> start_;
474
475 //! Stop point
476 grid_key_dx<dim> stop_;
477
478 //! Sub-grid box
479 Box<dim,size_t> bx;
480
481 //! border
482 openfpm::vector<unsigned int> bord;
483
484 //! chunks ids
485 openfpm::vector<unsigned int> chunk_shifts;
486
487 //! offsets
488 openfpm::vector<short int> offsets;
489
490 //! blocks skin
491 openfpm::vector<grid_key_dx<dim>> block_skin;
492
493 // chunk header container
494 mheader<SparseGridType::chunking_type::size::value> * hm;
495 cheader<dim> * hc;
496
497 // temporary buffer for Load border
498 openfpm::vector<unsigned int> maps_blk;
499
500 //!iteration block
501 Box<dim,size_t> block_it;
502
503 /*! \brief Everytime we move to a new chunk we calculate on which indexes we have to iterate
504 *
505 *
506 */
507 void SelectValid()
508 {
509 auto & header = spg.private_get_header_inf();
510 auto & header_mask = spg.private_get_header_mask();
511
512 while (chunk_id < header.size())
513 {
514 auto & mask = header_mask.get(chunk_id).mask;
515
516 fill_chunk_block<dim,decltype(header),vector_blocks_exts> fcb(header,chunk_id);
517
518 boost::mpl::for_each_ref<boost::mpl::range_c<int,0,dim>>(fcb);
519
520 if (bx.Intersect(fcb.cnk_box,block_it) == true)
521 {
522 block_it -= header.get(chunk_id).pos.toPoint();
523 break;
524 }
525 else
526 {chunk_id += 1;}
527 }
528 }
529
530public:
531
532 // we create first a vector with
533
534 typedef typename vmpl_sum_constant<2*stencil_size,typename vector_blocks_exts::type>::type stop_border_vmpl;
535 typedef typename vmpl_create_constant<dim,stencil_size>::type start_border_vmpl;
536
537 typedef typename generate_array_vector<size_t,typename vector_blocks_exts::type>::result size;
538 typedef typename generate_array_vector<size_t,start_border_vmpl>::result start_b_;
539 typedef typename generate_array_vector<size_t,stop_border_vmpl>::result stop_b_;
540
541 typedef vector_blocks_exts vector_blocks_exts_type;
542 typedef vector_ext vector_ext_type;
543
544 static const int sizeBlock = vector_blocks_exts::size::value;
545 static const int sizeBlockBord = vmpl_reduce_prod<stop_border_vmpl>::type::value;
546
547 /*! \brief Default constructor
548 *
549 * \warning extremely unsafe
550 * If you use this constructor before use the iterator you should call reinitialize first
551 *
552 */
553 grid_key_sparse_dx_iterator_block_sub() {};
554
555 grid_key_sparse_dx_iterator_block_sub(SparseGridType & spg,
556 const grid_key_dx<dim> & start,
557 const grid_key_dx<dim> & stop)
558 :spg(spg),chunk_id(1),
559 start_(start),stop_(stop)
560 {
561 // Create border coeficents
562 get_block_sizes<dim,stencil_size,vector_blocks_exts,vector_ext> gbs;
563
564 boost::mpl::for_each_ref< boost::mpl::range_c<int,0,dim> >(gbs);
565
566 Box<dim,int> skinb;
567 Box<dim,int> skinbb;
568
569 size_t bc[dim];
570 for (int i = 0 ; i < dim ; i ++)
571 {
572 skinb.setLow(i,0);
573 skinb.setHigh(i,gbs.sz_tot[i]-1);
574 skinbb.setLow(i,0);
575 skinbb.setHigh(i,gbs.sz_ext_b[i]-1);
576 bc[i] = NON_PERIODIC;
577 }
578
579 grid_sm<dim,void> g_smb(gbs.sz_ext_b);
580
581 // Create block skin index
582
583 openfpm::vector<unsigned int> b_map;
584 grid_skin_iterator_bc<3> gsi_b(g_smb,skinbb,skinbb,bc);
585
586 b_map.resize(g_smb.size());
587
588 while (gsi_b.isNext())
589 {
590 auto p = gsi_b.get();
591
592 block_skin.add(p);
593
594 b_map.get(g_smb.LinId(p)) = block_skin.size() - 1;
595
596 ++gsi_b;
597 }
598
599 grid_sm<dim,void> g_sm(gbs.sz_tot);
600 grid_skin_iterator_bc<3> gsi(g_sm,skinb,skinb,bc);
601
602 while (gsi.isNext())
603 {
604 auto p = gsi.get();
605
606 grid_key_dx<dim> sh;
607
608 bord.add(g_sm.LinId(p));
609
610 short offset = 0;
611 int stride = 1;
612 for (int i = 0 ; i < dim ; i++)
613 {
614
615 if (p.get(i) < stencil_size)
616 {offset += (gbs.sz_block[i]-1)*stride;}
617 else if (p.get(i) >= gbs.sz_tot[i] - stencil_size)
618 {offset += 0;}
619 else
620 {offset += (p.get(i)-stencil_size)*stride;}
621
622 sh.set_d(i,(p.get(i) + (gbs.sz_block[i] - stencil_size)) / gbs.sz_block[i]);
623 stride *= gbs.sz_block[i];
624 }
625
626 offsets.add(offset);
627
628 size_t bid = g_smb.LinId(sh);
629 chunk_shifts.add(b_map.get(bid));
630
631 ++gsi;
632 }
633
634 for (size_t i = 0 ; i < dim ; i++)
635 {
636 bx.setLow(i,start.get(i));
637 bx.setHigh(i,stop.get(i));
638 }
639
640 SelectValid();
641 }
642
643 /*! \brief Reinitialize the iterator
644 *
645 * it re-initialize the iterator with the passed grid_key_dx_iterator_sub
646 * the actual position of the grid_key_dx_iterator_sub is ignored
647 *
648 * \param g_s_it grid_key_dx_iterator_sub
649 *
650 */
651 inline void reinitialize(const grid_key_sparse_dx_iterator_sub<dim,vector_blocks_exts::size::value> & g_s_it)
652 {
653 spg = g_s_it.spg;
654 chunk_id = g_s_it.chunk_id;
655 start_ = g_s_it.start_;
656 stop_ = g_s_it.stop_;
657 bx = g_s_it.bx;
658 }
659
660 inline grid_key_sparse_dx_iterator_block_sub<dim,stencil_size,SparseGridType,vector_blocks_exts> & operator++()
661 {
662 auto & header = spg.private_get_header_inf();
663
664 chunk_id++;
665
666 if (chunk_id < header.size())
667 {
668 SelectValid();
669 }
670
671 return *this;
672 }
673
674 /*! \brief Return true if there is a next grid point
675 *
676 * \return true if there is the next grid point
677 *
678 */
679 bool isNext()
680 {
681 auto & header = spg.private_get_header_inf();
682
683 return chunk_id < header.size();
684 }
685
686 /*! \brief Return the starting point for the iteration
687 *
688 * \return the starting point
689 *
690 */
691 const grid_key_dx<dim> & getStart() const
692 {
693 return start_;
694 }
695
696 /*! \brief Return the stop point for the iteration
697 *
698 * \return the stop point
699 *
700 */
701 const grid_key_dx<dim> & getStop() const
702 {
703 return stop_;
704 }
705
706
707 template<unsigned int prop, typename T>
708 void loadBlock(T arr[sizeBlock])
709 {
710 auto & header_mask = spg.private_get_header_mask();
711 auto & header_inf = spg.private_get_header_inf();
712
713 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBlock<prop>(arr,spg,chunk_id);
714
715 hm = &header_mask.get(chunk_id);
716 hc = &header_inf.get(chunk_id);
717 }
718
719 template<unsigned int prop,typename T>
720 void loadBlock(T arr[sizeBlock], unsigned char mask[sizeBlock])
721 {
722 auto & header_mask = spg.private_get_header_mask();
723 auto & header_inf = spg.private_get_header_inf();
724
725 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBlock<prop>(arr,spg,chunk_id,mask);
726
727 hm = &header_mask.get(chunk_id);
728 hc = &header_inf.get(chunk_id);
729 }
730
731 template<unsigned int prop,typename T>
732 void storeBlock(T arr[sizeBlock])
733 {
734 auto & header_mask = spg.private_get_header_mask();
735 auto & header_inf = spg.private_get_header_inf();
736
737 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template storeBlock<sizeBlock>(arr,spg,chunk_id);
738
739 hm = &header_mask.get(chunk_id);
740 hc = &header_inf.get(chunk_id);
741 }
742
743
744 template<unsigned int prop, typename NNtype, bool findNN, typename T>
745 void loadBlockBorder(T arr[sizeBlockBord],unsigned char mask[sizeBlockBord])
746 {
747 auto & header_mask = spg.private_get_header_mask();
748 auto & header_inf = spg.private_get_header_inf();
749
750 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBlock<sizeBlockBord>(arr,spg,chunk_id,mask);
751 loadBlock_impl<prop,stencil_size,dim,vector_blocks_exts,vector_ext>::template loadBorder<findNN,NNtype,sizeBlockBord>(arr,spg,chunk_id,bord,block_skin,chunk_shifts,offsets,mask,maps_blk);
752
753 hm = &header_mask.get(chunk_id);
754 hc = &header_inf.get(chunk_id);
755 }
756
757
758 /*! \brief starting point of the computation block
759 *
760 * \param i coordinate
761 *
762 */
763 constexpr int start_b(int i) const
764 {
765 return block_it.getLow(i) + stencil_size;
766 }
767
768 /*! \brief stopping point of the computation block
769 *
770 * \param i coordinate
771 *
772 */
773 constexpr int stop_b(int i) const
774 {
775 return block_it.getHigh(i) + 1 + stencil_size;
776 }
777
778 /*! \brief starting point of the computation block
779 *
780 * \param i coordinate
781 *
782 */
783 constexpr int start(int i) const
784 {
785 return block_it.getLow(i);
786 }
787
788 /*! \brief stopping point of the computation block
789 *
790 * \param i coordinate
791 *
792 */
793 constexpr int stop(int i) const
794 {
795 return block_it.getHigh(i) + 1;
796 }
797
798 /*! \brief linearize an arbitrary set of index
799 *
800 * linearize an arbitrary set of index
801 *
802 */
803 template<typename a, typename ...lT>
804 __device__ __host__ inline size_t Lin(a v,lT...t) const
805 {
806#ifdef SE_CLASS1
807 if (sizeof...(t)+1 > dim)
808 {
809 std::cerr << "Error incorrect grid cannot linearize more index than its dimensionality" << "\n";
810 }
811#endif
812
813 return v*vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - (int)sizeof...(t) - 2>::type::value + Lin(t...);
814 }
815
816 //! Linearize a set of index
817 template<typename a> __device__ __host__ inline size_t Lin(a v) const
818 {
819 return v*vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - 2>::type::value;
820 }
821
822 /*! \brief linearize an arbitrary set of index
823 *
824 * linearize an arbitrary set of index
825 *
826 */
827 template<typename a, typename ...lT>
828 __device__ __host__ inline size_t LinB(a v,lT...t) const
829 {
830#ifdef SE_CLASS1
831 if (sizeof...(t)+1 > dim)
832 {
833 std::cerr << "Error incorrect grid cannot linearize more index than its dimensionality" << "\n";
834 }
835#endif
836
837 return v*vmpl_reduce_prod_stop<stop_border_vmpl,(int)dim - (int)sizeof...(t) - 2>::type::value + LinB(t...);
838 }
839
840 //! Linearize a set of index
841 template<typename a> __device__ __host__ inline size_t LinB(a v) const
842 {
843 return v*vmpl_reduce_prod_stop<stop_border_vmpl,(int)dim - 2>::type::value;
844 }
845
846 /*! \brief linearize an arbitrary set of index
847 *
848 * linearize an arbitrary set of index
849 *
850 */
851 template<typename a, typename ...lT>
852 __device__ __host__ inline size_t LinB_off(a v,lT...t) const
853 {
854#ifdef SE_CLASS1
855 if (sizeof...(t)+1 > dim)
856 {
857 std::cerr << "Error incorrect grid cannot linearize more index than its dimensionality" << "\n";
858 }
859#endif
860
861 return (v-stencil_size)*vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - (int)sizeof...(t) - 2>::type::value + LinB_off(t...);
862 }
863
864 //! Linearize a set of index
865 template<typename a> __device__ __host__ inline size_t LinB_off(a v) const
866 {
867 return (v-stencil_size)*(vmpl_reduce_prod_stop<typename vector_blocks_exts::type,(int)dim - 2>::type::value);
868 }
869
870 /*! Check if the point exist
871 *
872 * \param args index to linearize
873 *
874 */
875 template<typename ... ArgsType>
876 bool exist(ArgsType ... args)
877 {
878 size_t l = LinB_off(args ...);
879
880 return spg.exist_sub(*hm,l);
881 }
882
883 /*! \brief Return the chunk id
884 *
885 * \return the chunk id
886 *
887 */
888 int getChunkId()
889 {
890 return chunk_id;
891 }
892};
893
894
895#endif /* SPARSEGRID_ITERATOR_BLOCK_HPP_ */
896