| 1 | /* |
| 2 | * grid_amr_dist.hpp |
| 3 | * |
| 4 | * Created on: Sep 21, 2017 |
| 5 | * Author: i-bird |
| 6 | */ |
| 7 | |
| 8 | #ifndef AMR_GRID_AMR_DIST_HPP_ |
| 9 | #define AMR_GRID_AMR_DIST_HPP_ |
| 10 | |
| 11 | #define OPENFPM_DATA_ENABLE_IO_MODULE |
| 12 | |
| 13 | #include "Grid/grid_dist_id.hpp" |
| 14 | #include "Amr/grid_dist_amr_key_iterator.hpp" |
| 15 | |
| 16 | #ifdef __NVCC__ |
| 17 | #include "SparseGridGpu/SparseGridGpu.hpp" |
| 18 | #endif |
| 19 | |
| 20 | #define AMR_IMPL_TRIVIAL 1 |
| 21 | #define AMR_IMPL_PATCHES 2 |
| 22 | #define AMR_IMPL_OPENVDB 3 |
| 23 | |
| 24 | template<typename Decomposition, typename garray> |
| 25 | class Decomposition_encap |
| 26 | { |
| 27 | Decomposition & dec; |
| 28 | garray & gd_array; |
| 29 | |
| 30 | public: |
| 31 | |
| 32 | Decomposition_encap(Decomposition & dec, garray & gd_array) |
| 33 | :dec(dec),gd_array(gd_array) |
| 34 | {} |
| 35 | |
| 36 | Decomposition & internal_dec() const |
| 37 | { |
| 38 | return dec; |
| 39 | } |
| 40 | |
| 41 | /*! \brief Start decomposition |
| 42 | * |
| 43 | */ |
| 44 | void decompose() |
| 45 | { |
| 46 | dec.decompose(); |
| 47 | |
| 48 | for(size_t i = 0 ; i < gd_array.size() ; i++) |
| 49 | { |
| 50 | Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost(); |
| 51 | gd_array.get(i).getDecomposition() = dec.duplicate(gold); |
| 52 | } |
| 53 | } |
| 54 | |
| 55 | /*! \brief Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call |
| 56 | * |
| 57 | * \param ts number of time step from the previous load balancing |
| 58 | * |
| 59 | */ |
| 60 | void refine(size_t ts) |
| 61 | { |
| 62 | dec.refine(); |
| 63 | |
| 64 | for(size_t i = 0 ; i < gd_array.size() ; i++) |
| 65 | { |
| 66 | Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost(); |
| 67 | gd_array.get(i).getDecomposition() = dec.duplicate(gold); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | /*! \brief Refine the decomposition, available only for ParMetis distribution, for Metis it is a null call |
| 72 | * |
| 73 | * \param ts number of time step from the previous load balancing |
| 74 | * |
| 75 | */ |
| 76 | void redecompose(size_t ts) |
| 77 | { |
| 78 | dec.redecompose(); |
| 79 | |
| 80 | for(size_t i = 0 ; i < gd_array.size() ; i++) |
| 81 | { |
| 82 | Ghost<Decomposition::dims,typename Decomposition::stype> gold = gd_array.get(i).getDecomposition().getGhost(); |
| 83 | gd_array.get(i).getDecomposition() = dec.duplicate(gold); |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | auto getDistribution() -> decltype(dec.getDistribution()) |
| 88 | { |
| 89 | return dec.getDistribution(); |
| 90 | } |
| 91 | |
| 92 | Decomposition_encap<Decomposition,garray> operator=(const Decomposition_encap<Decomposition,garray> & de) const |
| 93 | { |
| 94 | for(size_t i = 0 ; i < gd_array.size() ; i++) |
| 95 | {gd_array.get(i).getDecomposition() = de.gd_array.get(i).getDecomposition();} |
| 96 | |
| 97 | return *this; |
| 98 | } |
| 99 | |
| 100 | bool write(std::string output) const |
| 101 | { |
| 102 | return dec.write(output); |
| 103 | } |
| 104 | }; |
| 105 | |
| 106 | template<unsigned int dim, |
| 107 | typename St, |
| 108 | typename T, |
| 109 | unsigned int impl=AMR_IMPL_TRIVIAL , |
| 110 | typename Decomposition = CartDecomposition<dim,St>, |
| 111 | typename Memory=HeapMemory , |
| 112 | typename device_grid=grid_cpu<dim,T> > |
| 113 | class grid_dist_amr |
| 114 | { |
| 115 | |
| 116 | }; |
| 117 | |
| 118 | |
| 119 | /*! \brief AMR Adaptive Multi Resolution Grid |
| 120 | * |
| 121 | * \tparam dim Dimensionality |
| 122 | * \tparam St type of space |
| 123 | * \tparam T what each point of the grid store |
| 124 | * \tparam Decomposition type of decomposition |
| 125 | * |
| 126 | */ |
| 127 | template<unsigned int dim, |
| 128 | typename St, |
| 129 | typename T, |
| 130 | typename Decomposition, |
| 131 | typename Memory, |
| 132 | typename device_grid > |
| 133 | class grid_dist_amr<dim,St,T,AMR_IMPL_TRIVIAL,Decomposition,Memory,device_grid> |
| 134 | { |
| 135 | //! Simulation domain |
| 136 | Box<dim,St> domain; |
| 137 | |
| 138 | //! Ghost integer |
| 139 | Ghost<dim,long int> g_int; |
| 140 | |
| 141 | //! Boundary conditions of the structure |
| 142 | periodicity<dim> bc; |
| 143 | |
| 144 | //! array of grids |
| 145 | // |
| 146 | openfpm::vector<grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>, |
| 147 | HeapMemory, |
| 148 | memory_traits_lin, |
| 149 | openfpm::grow_policy_identity,STD_VECTOR> gd_array; |
| 150 | |
| 151 | //! Type of structure sub-grid iterator |
| 152 | typedef decltype(device_grid::type_of_subiterator()) device_sub_it; |
| 153 | |
| 154 | //! Type of structure for the grid iterator |
| 155 | typedef decltype(device_grid::type_of_iterator()) device_it; |
| 156 | |
| 157 | //! Domain iterator for each distributed grid |
| 158 | openfpm::vector<grid_dist_iterator<dim,device_grid,device_sub_it,FREE>> git; |
| 159 | |
| 160 | //! Domain and ghost iterator for each distributed grid |
| 161 | openfpm::vector<grid_dist_iterator<dim,device_grid,device_it,FIXED>> git_g; |
| 162 | |
| 163 | //! Iterator for each distributed grid |
| 164 | openfpm::vector<grid_dist_iterator_sub<dim,device_grid>> git_sub; |
| 165 | |
| 166 | //! Moving offsets |
| 167 | openfpm::vector<openfpm::vector<offset_mv<dim>>> mv_off; |
| 168 | |
| 169 | //! background level |
| 170 | T bck; |
| 171 | |
| 172 | /*! \brief Initialize the others levels |
| 173 | * |
| 174 | * \param n_grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>lvl number of levels |
| 175 | * \param g_sz_lvl grid size on each level |
| 176 | * |
| 177 | */ |
| 178 | void initialize_other(size_t n_lvl, size_t (& g_sz_lvl)[dim]) |
| 179 | { |
| 180 | for (size_t i = 0; i < n_lvl - 1 ; i++) |
| 181 | { |
| 182 | for (size_t j = 0 ; j < dim ; j++) |
| 183 | { |
| 184 | if (bc.bc[j] == NON_PERIODIC) |
| 185 | {g_sz_lvl[j] = (g_sz_lvl[j]-1)*2 + 1;} |
| 186 | else |
| 187 | {g_sz_lvl[j] = g_sz_lvl[j]*2;} |
| 188 | } |
| 189 | |
| 190 | gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(gd_array.get(0).getDecomposition(),g_sz_lvl,g_int)); |
| 191 | gd_array.last().setBackgroundValue(bck); |
| 192 | |
| 193 | gd_array.last().getDecomposition().free_geo_cell(); |
| 194 | gd_array.last().getDecomposition().getDistribution().destroy_internal_graph(); |
| 195 | gd_array.last().getDecomposition().free_fines(); |
| 196 | } |
| 197 | |
| 198 | recalculate_mvoff(); |
| 199 | } |
| 200 | |
| 201 | public: |
| 202 | |
| 203 | |
| 204 | /*! \brief Constructor |
| 205 | * |
| 206 | * \param domain Simulation domain |
| 207 | * \param g ghost extension |
| 208 | * |
| 209 | */ |
| 210 | grid_dist_amr(const Box<dim,St> & domain, const Ghost<dim,long int> & g) |
| 211 | :domain(domain),g_int(g) |
| 212 | { |
| 213 | // set boundary consitions to non periodic |
| 214 | |
| 215 | for (size_t i = 0; i < dim ; i++) |
| 216 | {bc.bc[i] = NON_PERIODIC;} |
| 217 | } |
| 218 | |
| 219 | /*! \brief Constructor |
| 220 | * |
| 221 | * \param domain Simulation domain |
| 222 | * \param g ghost extension |
| 223 | * \param bc boundary conditions |
| 224 | * |
| 225 | */ |
| 226 | grid_dist_amr(const Box<dim,St> & domain, const Ghost<dim,long int> & g, periodicity<dim> & bc) |
| 227 | :domain(domain),g_int(g),bc(bc) |
| 228 | { |
| 229 | } |
| 230 | |
| 231 | /*! \brief Initialize the amr grid |
| 232 | * |
| 233 | * \param dec Decomposition (this parameter is useful in case we want to constrain the AMR to an external decomposition) |
| 234 | * \param n_lvl maximum number of levels (0 mean no additional levels) |
| 235 | * \param g_sz coarsest grid size on each direction |
| 236 | * |
| 237 | */ |
| 238 | void initLevels(const Decomposition & dec, size_t n_lvl,const size_t (& g_sz)[dim]) |
| 239 | { |
| 240 | size_t g_sz_lvl[dim]; |
| 241 | |
| 242 | for (size_t i = 0; i < dim ; i++) |
| 243 | {g_sz_lvl[i] = g_sz[i];} |
| 244 | |
| 245 | // Add the coarse level |
| 246 | gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(dec,g_sz,g_int)); |
| 247 | gd_array.last().setBackgroundValue(bck); |
| 248 | |
| 249 | initialize_other(n_lvl,g_sz_lvl); |
| 250 | } |
| 251 | |
| 252 | /*! \brief Initialize the amr grid |
| 253 | * |
| 254 | * \param dec Decomposition (this parameter is useful in case we want to constrain the AMR to an external decomposition) |
| 255 | * \param n_lvl maximum number of levels (0 mean no additional levels) |
| 256 | * \param g_sz coarsest grid size on each direction |
| 257 | * |
| 258 | */ |
| 259 | template<typename TT> void initLevels(const Decomposition_encap<Decomposition,TT> & dec, size_t n_lvl,const size_t (& g_sz)[dim]) |
| 260 | { |
| 261 | initLevels(dec.internal_dec(),n_lvl,g_sz); |
| 262 | } |
| 263 | |
| 264 | /*! \brief Recalculate the offset array for the moveLvlUp and moveLvlDw |
| 265 | * |
| 266 | * |
| 267 | * |
| 268 | */ |
| 269 | void recalculate_mvoff() |
| 270 | { |
| 271 | // Here we calculate the offset to move one level up and one level down |
| 272 | // in global coordinated moving one level up is multiply the coordinates by 2 |
| 273 | // and moving one level down is dividing by 2. In local coordinates is the same |
| 274 | // with the exception that because of the decomposition you can have an offset |
| 275 | // look at the picture below |
| 276 | // |
| 277 | // (-1) (0) |
| 278 | // * | * * coarse level |
| 279 | // * |* * * * finer level |
| 280 | // |(0)(1) |
| 281 | // |
| 282 | // Line of the decomposition |
| 283 | // |
| 284 | // The coarse level point 0 in local coordinates converted to the finer level is not |
| 285 | // just 2*0 = 0 but is 2*(0) + 1 so a formula like 2*x+offset is required. here we calculate |
| 286 | // these offset. In the case of moving from finer to coarse is the same the formula is |
| 287 | // Integer_round(x+1)/2 - 1 |
| 288 | // |
| 289 | mv_off.resize(gd_array.size()); |
| 290 | |
| 291 | for (size_t i = 1 ; i < gd_array.size() ; i++) |
| 292 | { |
| 293 | auto & g_box_c = gd_array.get(i-1).getLocalGridsInfo(); |
| 294 | auto & g_box_f = gd_array.get(i).getLocalGridsInfo(); |
| 295 | |
| 296 | #ifdef SE_CLASS1 |
| 297 | |
| 298 | if (g_box_c.size() != g_box_f.size()) |
| 299 | { |
| 300 | std::cerr << __FILE__ << ":" << __LINE__ << " error it seem that the AMR construction between level " << |
| 301 | i << " and " << i-1 << " is inconsistent" << std::endl; |
| 302 | } |
| 303 | |
| 304 | #endif |
| 305 | |
| 306 | mv_off.get(i-1).resize(g_box_f.size()); |
| 307 | mv_off.get(i).resize(g_box_f.size()); |
| 308 | |
| 309 | for (size_t j = 0 ; j < g_box_f.size() ; j++) |
| 310 | { |
| 311 | for (size_t s = 0 ; s < dim ; s++) |
| 312 | { |
| 313 | size_t d_orig_c = g_box_c.get(j).origin.get(s); |
| 314 | size_t d_orig_f = g_box_f.get(j).origin.get(s); |
| 315 | |
| 316 | mv_off.get(i-1).get(j).dw.get(s) = d_orig_c*2 - d_orig_f; |
| 317 | mv_off.get(i).get(j).up.get(s) = d_orig_c*2 - d_orig_f; |
| 318 | } |
| 319 | } |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | /*! \brief Initialize the amr grid |
| 324 | * |
| 325 | * \param n_lvl maximum number of levels (0 mean no additional levels) |
| 326 | * \param g_sz coarsest grid size on each direction |
| 327 | * \param opt options |
| 328 | * |
| 329 | */ |
| 330 | void initLevels(size_t n_lvl,const size_t (& g_sz)[dim], size_t opt = 0) |
| 331 | { |
| 332 | size_t g_sz_lvl[dim]; |
| 333 | |
| 334 | for (size_t i = 0; i < dim ; i++) |
| 335 | {g_sz_lvl[i] = g_sz[i];} |
| 336 | |
| 337 | // Add the coarse level |
| 338 | gd_array.add(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>(g_sz,domain,g_int,bc,opt)); |
| 339 | |
| 340 | initialize_other(n_lvl,g_sz_lvl); |
| 341 | } |
| 342 | |
| 343 | /*! \brief Add the computation cost on the decomposition using a resolution function |
| 344 | * |
| 345 | * |
| 346 | * \param md Model to use |
| 347 | * \param ts It is an optional parameter approximately should be the number of ghost get between two |
| 348 | * rebalancing at first decomposition this number can be ignored (default = 1) because not used |
| 349 | * |
| 350 | */ |
| 351 | template <typename Model>inline void addComputationCosts(Model md=Model(), size_t ts = 1) |
| 352 | { |
| 353 | gd_array.get(0).addComputationCosts(md,ts); |
| 354 | } |
| 355 | |
| 356 | /*! \brief Get the object that store the information about the decomposition |
| 357 | * |
| 358 | * \return the decomposition object |
| 359 | * |
| 360 | */ |
| 361 | Decomposition_encap<Decomposition,decltype(gd_array)> getDecomposition() |
| 362 | { |
| 363 | Decomposition_encap<Decomposition,decltype(gd_array)> tmp(gd_array.get(0).getDecomposition(),gd_array); |
| 364 | |
| 365 | return tmp; |
| 366 | } |
| 367 | |
| 368 | /*! \brief Get the underlying grid level |
| 369 | * |
| 370 | * \param lvl level |
| 371 | * |
| 372 | * \return the grid level |
| 373 | * |
| 374 | */ |
| 375 | grid_dist_id<dim,St,T,Decomposition,Memory,device_grid> & getLevel(size_t lvl) |
| 376 | { |
| 377 | return gd_array.get(lvl); |
| 378 | } |
| 379 | |
| 380 | |
| 381 | grid_dist_amr_key_iterator<dim,device_grid, |
| 382 | decltype(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>::type_of_subiterator()), |
| 383 | decltype(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>::type_of_subiterator()) > |
| 384 | getDomainIteratorCells() |
| 385 | { |
| 386 | git_sub.clear(); |
| 387 | |
| 388 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 389 | { |
| 390 | grid_key_dx<dim> start; |
| 391 | grid_key_dx<dim> stop; |
| 392 | |
| 393 | for (size_t j = 0 ; j < dim ; j++) |
| 394 | { |
| 395 | start.set_d(j,0); |
| 396 | if (bc.bc[j] == NON_PERIODIC) |
| 397 | {stop.set_d(j,getGridInfoVoid(i).size(j) - 2);} |
| 398 | else |
| 399 | {stop.set_d(j,getGridInfoVoid(i).size(j) - 1);} |
| 400 | } |
| 401 | |
| 402 | git_sub.add(gd_array.get(i).getSubDomainIterator(start,stop)); |
| 403 | } |
| 404 | |
| 405 | return grid_dist_amr_key_iterator<dim,device_grid, |
| 406 | decltype(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>::type_of_subiterator()), |
| 407 | decltype(grid_dist_id<dim,St,T,Decomposition,Memory,device_grid>::type_of_subiterator())>(git_sub); |
| 408 | } |
| 409 | |
| 410 | grid_dist_iterator_sub<dim,device_grid> getDomainIteratorCells(size_t lvl) |
| 411 | { |
| 412 | grid_key_dx<dim> start; |
| 413 | grid_key_dx<dim> stop; |
| 414 | |
| 415 | for (size_t j = 0 ; j < dim ; j++) |
| 416 | { |
| 417 | start.set_d(j,0); |
| 418 | if (bc.bc[j] == NON_PERIODIC) |
| 419 | {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 2);} |
| 420 | else |
| 421 | {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 1);} |
| 422 | } |
| 423 | |
| 424 | return gd_array.get(lvl).getSubDomainIterator(start,stop); |
| 425 | } |
| 426 | |
| 427 | /*! \brief Get an iterator to the grid |
| 428 | * |
| 429 | * \return an iterator to the grid |
| 430 | * |
| 431 | */ |
| 432 | auto getGridGhostIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridGhostIterator(grid_key_dx<dim>(),grid_key_dx<dim>())) |
| 433 | { |
| 434 | grid_key_dx<dim> key_start; |
| 435 | grid_key_dx<dim> key_stop; |
| 436 | |
| 437 | for (size_t i = 0 ; i < dim ; i++) |
| 438 | { |
| 439 | key_start.set_d(i,g_int.getLow(i)); |
| 440 | key_stop.set_d(i,g_int.getHigh(i) + getGridInfoVoid(lvl).size(i) -1); |
| 441 | } |
| 442 | |
| 443 | return gd_array.get(lvl).getGridGhostIterator(key_start,key_stop); |
| 444 | } |
| 445 | |
| 446 | /*! \brief Get an iterator to the grid |
| 447 | * |
| 448 | * \return an iterator to the grid |
| 449 | * |
| 450 | */ |
| 451 | auto getGridIterator(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator()) |
| 452 | { |
| 453 | return gd_array.get(lvl).getGridIterator(); |
| 454 | } |
| 455 | |
| 456 | /*! \brief Get an iterator to the grid |
| 457 | * |
| 458 | * \return an iterator to the grid |
| 459 | * |
| 460 | */ |
| 461 | auto getGridIterator(size_t lvl, grid_key_dx<dim> & start, grid_key_dx<dim> & stop) -> decltype(gd_array.get(lvl).getGridIterator(start,stop)) |
| 462 | { |
| 463 | return gd_array.get(lvl).getGridIterator(start,stop); |
| 464 | } |
| 465 | |
| 466 | #ifdef __NVCC__ |
| 467 | |
| 468 | /*! \brief Get an iterator to the grid |
| 469 | * |
| 470 | * \return an iterator to the grid |
| 471 | * |
| 472 | */ |
| 473 | auto getGridIteratorGPU(size_t lvl) -> decltype(gd_array.get(lvl).getGridIteratorGPU()) |
| 474 | { |
| 475 | return gd_array.get(lvl).getGridIteratorGPU(); |
| 476 | } |
| 477 | |
| 478 | #endif |
| 479 | |
| 480 | /*! \brief Get an iterator to the grid |
| 481 | * |
| 482 | * \return an iterator to the grid |
| 483 | * |
| 484 | */ |
| 485 | auto getGridIteratorCells(size_t lvl) -> decltype(gd_array.get(lvl).getGridIterator()) |
| 486 | { |
| 487 | grid_key_dx<dim> start; |
| 488 | grid_key_dx<dim> stop; |
| 489 | |
| 490 | for (size_t j = 0 ; j < dim ; j++) |
| 491 | { |
| 492 | start.set_d(j,0); |
| 493 | if (bc.bc[j] == NON_PERIODIC) |
| 494 | {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 2);} |
| 495 | else |
| 496 | {stop.set_d(j,getGridInfoVoid(lvl).size(j) - 1);} |
| 497 | } |
| 498 | |
| 499 | return gd_array.get(lvl).getGridIterator(start,stop); |
| 500 | } |
| 501 | |
| 502 | |
| 503 | /*! \brief return an iterator over the level lvl |
| 504 | * |
| 505 | * \param lvl level |
| 506 | * |
| 507 | * \return an iterator over the level lvl selected |
| 508 | * |
| 509 | */ |
| 510 | grid_dist_iterator<dim,device_grid,decltype(device_grid::type_of_subiterator()),FREE> |
| 511 | getDomainIterator(size_t lvl) const |
| 512 | { |
| 513 | return gd_array.get(lvl).getDomainIterator(); |
| 514 | } |
| 515 | |
| 516 | /*! \brief return an iterator over the level lvl |
| 517 | * |
| 518 | * \param lvl level |
| 519 | * |
| 520 | * \return an iterator over the level lvl selected |
| 521 | * |
| 522 | */ |
| 523 | grid_dist_iterator<dim,device_grid, |
| 524 | decltype(device_grid::type_of_iterator()), |
| 525 | FIXED> |
| 526 | getDomainGhostIterator(size_t lvl) const |
| 527 | { |
| 528 | return gd_array.get(lvl).getDomainGhostIterator(); |
| 529 | } |
| 530 | |
| 531 | /*! \brief Get domain iterator |
| 532 | * |
| 533 | * \return an iterator over all the grid levels |
| 534 | * |
| 535 | */ |
| 536 | grid_dist_amr_key_iterator<dim,device_grid, decltype(device_grid::type_of_subiterator())> |
| 537 | getDomainIterator() |
| 538 | { |
| 539 | git.clear(); |
| 540 | |
| 541 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 542 | { |
| 543 | git.add(gd_array.get(i).getDomainIterator()); |
| 544 | } |
| 545 | |
| 546 | return grid_dist_amr_key_iterator<dim,device_grid,decltype(device_grid::type_of_subiterator())>(git); |
| 547 | } |
| 548 | |
| 549 | /*! \brief Get domain iterator |
| 550 | * |
| 551 | * \return an iterator over all the grid levels |
| 552 | * |
| 553 | */ |
| 554 | grid_dist_amr_key_iterator<dim,device_grid, decltype(device_grid::type_of_iterator()), |
| 555 | grid_dist_iterator<dim,device_grid,decltype(device_grid::type_of_iterator()),FIXED>> |
| 556 | getDomainGhostIterator() |
| 557 | { |
| 558 | git_g.clear(); |
| 559 | |
| 560 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 561 | { |
| 562 | git_g.add(gd_array.get(i).getDomainGhostIterator()); |
| 563 | } |
| 564 | |
| 565 | return grid_dist_amr_key_iterator<dim,device_grid,decltype(device_grid::type_of_iterator()), |
| 566 | grid_dist_iterator<dim,device_grid,decltype(device_grid::type_of_iterator()),FIXED>>(git_g); |
| 567 | } |
| 568 | |
| 569 | /*! \brief Get the reference of the selected element |
| 570 | * |
| 571 | * \tparam p property to get (is an integer) |
| 572 | * \param v1 grid_key that identify the element in the grid |
| 573 | * |
| 574 | * \return the selected element |
| 575 | * |
| 576 | */ |
| 577 | template <unsigned int p>inline auto get(const grid_dist_amr_key<dim> & v1) const -> typename std::add_lvalue_reference<decltype(gd_array.get(v1.getLvl()).template get<p>(v1.getKey()))>::type |
| 578 | { |
| 579 | #ifdef SE_CLASS2 |
| 580 | check_valid(this,8); |
| 581 | #endif |
| 582 | return gd_array.get(v1.getLvl()).template get<p>(v1.getKey()); |
| 583 | } |
| 584 | |
| 585 | /*! \brief Get the reference of the selected element |
| 586 | * |
| 587 | * \tparam p property to get (is an integer) |
| 588 | * \param v1 grid_key that identify the element in the grid |
| 589 | * |
| 590 | * \return the selected element |
| 591 | * |
| 592 | */ |
| 593 | template <unsigned int p>inline auto get(const grid_dist_amr_key<dim> & v1) -> typename std::add_lvalue_reference<decltype(gd_array.get(v1.getLvl()).template get<p>(v1.getKey()))>::type |
| 594 | { |
| 595 | #ifdef SE_CLASS2 |
| 596 | check_valid(this,8); |
| 597 | #endif |
| 598 | return gd_array.get(v1.getLvl()).template get<p>(v1.getKey()); |
| 599 | } |
| 600 | |
| 601 | |
| 602 | /*! \brief Get the reference of the selected element |
| 603 | * |
| 604 | * \tparam p property to get (is an integer) |
| 605 | * \param v1 grid_key that identify the element in the grid |
| 606 | * |
| 607 | * \return the selected element |
| 608 | * |
| 609 | */ |
| 610 | template <unsigned int p>inline auto get(size_t lvl, const grid_dist_key_dx<dim> & v1) const -> typename std::add_lvalue_reference<decltype(gd_array.get(lvl).template get<p>(v1))>::type |
| 611 | { |
| 612 | #ifdef SE_CLASS2 |
| 613 | check_valid(this,8); |
| 614 | #endif |
| 615 | return gd_array.get(lvl).template get<p>(v1); |
| 616 | } |
| 617 | |
| 618 | /*! \brief Get the reference of the selected element |
| 619 | * |
| 620 | * \tparam p property to get (is an integer) |
| 621 | * \param v1 grid_key that identify the element in the grid |
| 622 | * |
| 623 | * \return the selected element |
| 624 | * |
| 625 | */ |
| 626 | template <unsigned int p>inline auto get(size_t lvl, const grid_dist_key_dx<dim> & v1) -> typename std::add_lvalue_reference<decltype(gd_array.get(lvl).template get<p>(v1))>::type |
| 627 | { |
| 628 | #ifdef SE_CLASS2 |
| 629 | check_valid(this,8); |
| 630 | #endif |
| 631 | return gd_array.get(lvl).template get<p>(v1); |
| 632 | } |
| 633 | |
| 634 | //////////////////// Insert functions |
| 635 | |
| 636 | |
| 637 | /*! \brief Get the reference of the selected element |
| 638 | * |
| 639 | * \tparam p property to get (is an integer) |
| 640 | * \param v1 grid_key that identify the element in the grid |
| 641 | * |
| 642 | * \return the selected element |
| 643 | * |
| 644 | */ |
| 645 | template <unsigned int p> |
| 646 | inline auto insert(const grid_dist_amr_key<dim> & v1) |
| 647 | -> typename std::add_lvalue_reference<decltype(gd_array.get(v1.getLvl()).template insert<p>(v1.getKey()))>::type |
| 648 | { |
| 649 | #ifdef SE_CLASS2 |
| 650 | check_valid(this,8); |
| 651 | #endif |
| 652 | return gd_array.get(v1.getLvl()).template insert<p>(v1.getKey()); |
| 653 | } |
| 654 | |
| 655 | |
| 656 | |
| 657 | /*! \brief Get the reference of the selected element |
| 658 | * |
| 659 | * \tparam p property to get (is an integer) |
| 660 | * \param v1 grid_key that identify the element in the grid |
| 661 | * |
| 662 | * \return the selected element |
| 663 | * |
| 664 | */ |
| 665 | template <unsigned int p>inline auto insert(size_t lvl, const grid_dist_key_dx<dim> & v1) |
| 666 | -> typename std::add_lvalue_reference<decltype(gd_array.get(lvl).template insert<p>(v1))>::type |
| 667 | { |
| 668 | #ifdef SE_CLASS2 |
| 669 | check_valid(this,8); |
| 670 | #endif |
| 671 | return gd_array.get(lvl).template insert<p>(v1); |
| 672 | } |
| 673 | |
| 674 | ////////////////////////////////////// |
| 675 | |
| 676 | /*! \brief Get the internal distributed grid |
| 677 | * |
| 678 | * \param lvl level |
| 679 | * |
| 680 | * \return the internal distributed grid |
| 681 | * |
| 682 | */ |
| 683 | grid_dist_id<dim,St,T,Decomposition,Memory,device_grid> & getDistGrid(size_t lvl) |
| 684 | { |
| 685 | return gd_array.get(lvl); |
| 686 | } |
| 687 | |
| 688 | //////////////////// Remove functions |
| 689 | |
| 690 | /*! \brief Remove a grid point (this function make sense only in case of |
| 691 | * sparse grid) |
| 692 | * |
| 693 | * \param v1 grid_key that identify the element in the AMR grid to eleminate |
| 694 | * |
| 695 | */ |
| 696 | inline void remove(const grid_dist_amr_key<dim> & v1) |
| 697 | { |
| 698 | #ifdef SE_CLASS2 |
| 699 | check_valid(this,8); |
| 700 | #endif |
| 701 | return gd_array.get(v1.getLvl()).remove(v1.getKey()); |
| 702 | } |
| 703 | |
| 704 | /*! \brief Remove a grid point (this function make sense only in case of |
| 705 | * sparse grid) |
| 706 | * |
| 707 | * \param v1 grid_key that identify the element in the AMR grid to eleminate |
| 708 | * |
| 709 | */ |
| 710 | void remove(size_t lvl, const grid_dist_key_dx<dim> & v1) |
| 711 | { |
| 712 | #ifdef SE_CLASS2 |
| 713 | check_valid(this,8); |
| 714 | #endif |
| 715 | return gd_array.get(lvl).remove(v1); |
| 716 | } |
| 717 | |
| 718 | /*! \brief construct level connections for padding particles |
| 719 | * |
| 720 | * |
| 721 | */ |
| 722 | void construct_level_connections() |
| 723 | { |
| 724 | for (int lvl = 0 ; lvl < gd_array.size() ; lvl++) |
| 725 | { |
| 726 | if (lvl == 0) |
| 727 | { |
| 728 | gd_array.get(lvl).construct_link_dw(gd_array.get(lvl+1),mv_off.get(lvl)); |
| 729 | } |
| 730 | else if (lvl == gd_array.size() - 1) |
| 731 | {gd_array.get(lvl).construct_link_up(gd_array.get(lvl-1),mv_off.get(lvl));} |
| 732 | else |
| 733 | { |
| 734 | gd_array.get(lvl).construct_link_dw(gd_array.get(lvl+1),mv_off.get(lvl)); |
| 735 | gd_array.get(lvl).construct_link_up(gd_array.get(lvl-1),mv_off.get(lvl)); |
| 736 | } |
| 737 | } |
| 738 | } |
| 739 | |
| 740 | /*! \brief construct level connections for padding particles |
| 741 | * |
| 742 | * \tparam stencil_type type of stencil |
| 743 | * |
| 744 | */ |
| 745 | template<typename stencil_type> |
| 746 | void tagBoundaries() |
| 747 | { |
| 748 | for (int lvl = 0 ; lvl < gd_array.size() ; lvl++) |
| 749 | { |
| 750 | gd_array.get(lvl).template tagBoundaries<stencil_type>(); |
| 751 | } |
| 752 | } |
| 753 | |
| 754 | ////////////////////////////////////// |
| 755 | |
| 756 | /*! \brief It synchronize the ghost parts |
| 757 | * |
| 758 | * \tparam prp... Properties to synchronize |
| 759 | * |
| 760 | */ |
| 761 | template<int... prp> void ghost_get(size_t opt = 0) |
| 762 | { |
| 763 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 764 | { |
| 765 | gd_array.get(i).template ghost_get<prp...>(opt); |
| 766 | } |
| 767 | } |
| 768 | |
| 769 | /*! \brief It move all the grid parts that do not belong to the local processor to the respective processor |
| 770 | * |
| 771 | */ |
| 772 | void map(size_t opt = 0) |
| 773 | { |
| 774 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 775 | { |
| 776 | gd_array.get(i).map(); |
| 777 | } |
| 778 | |
| 779 | recalculate_mvoff(); |
| 780 | } |
| 781 | |
| 782 | /*! \brief Apply the ghost put |
| 783 | * |
| 784 | * \tparam prp... Properties to apply ghost put |
| 785 | * |
| 786 | */ |
| 787 | template<template<typename,typename> class op,int... prp> void ghost_put() |
| 788 | { |
| 789 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 790 | { |
| 791 | gd_array.get(i).template ghost_put<op,prp...>(); |
| 792 | } |
| 793 | } |
| 794 | |
| 795 | /*! \brief Return the number of inserted points on a particular level |
| 796 | * |
| 797 | * \return the number of inserted points |
| 798 | * |
| 799 | */ |
| 800 | size_t size_inserted(size_t lvl) |
| 801 | { |
| 802 | return gd_array.get(lvl).size_local_inserted(); |
| 803 | } |
| 804 | |
| 805 | /*! \brief set the background value |
| 806 | * |
| 807 | * You can use this function make sense in case of sparse in case of dense |
| 808 | * it does nothing |
| 809 | * |
| 810 | */ |
| 811 | void setBackgroundValue(T & bv) |
| 812 | { |
| 813 | for (size_t i = 0 ; i < getNLvl() ; i++) |
| 814 | {gd_array.get(i).setBackgroundValue(bv);} |
| 815 | |
| 816 | meta_copy<T>::meta_copy_(bv,bck); |
| 817 | } |
| 818 | |
| 819 | /*! \brief delete all the points in the grid |
| 820 | * |
| 821 | * In case of sparse grid in delete all the inserted points, in case |
| 822 | * of dense it does nothing |
| 823 | * |
| 824 | */ |
| 825 | void clear() |
| 826 | { |
| 827 | for (size_t i = 0 ; i < getNLvl() ; i++) |
| 828 | {gd_array.get(i).clear();} |
| 829 | } |
| 830 | |
| 831 | /*! \brief Get an object containing the grid informations for a specific level |
| 832 | * |
| 833 | * \param lvl level |
| 834 | * |
| 835 | * \return an information object about this grid |
| 836 | * |
| 837 | */ |
| 838 | const grid_sm<dim,void> & getGridInfoVoid(size_t lvl) const |
| 839 | { |
| 840 | return gd_array.get(lvl).getGridInfoVoid(); |
| 841 | } |
| 842 | |
| 843 | /*! \brief Return the maximum number of levels in the AMR struct |
| 844 | * |
| 845 | * \return the number of levels |
| 846 | * |
| 847 | */ |
| 848 | size_t getNLvl() |
| 849 | { |
| 850 | return gd_array.size(); |
| 851 | } |
| 852 | |
| 853 | /*! \brief Move down (to finer level) the key |
| 854 | * |
| 855 | * \param key multi-resolution AMR key |
| 856 | * |
| 857 | */ |
| 858 | void moveLvlDw(grid_dist_amr_key<dim> & key) |
| 859 | { |
| 860 | #ifdef SE_CLASS1 |
| 861 | |
| 862 | if (key.getLvl() >= getNLvl() - 1) |
| 863 | {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the last level, we cannot go one level down" << std::endl;} |
| 864 | |
| 865 | #endif |
| 866 | |
| 867 | auto & key_ref = key.getKeyRef().getKeyRef(); |
| 868 | size_t lvl = key.getLvl(); |
| 869 | |
| 870 | for (size_t i = 0 ; i < dim ; i++) |
| 871 | { |
| 872 | key_ref.set_d(i,(key_ref.get(i) << 1) + mv_off.get(key.getLvl()).get(key.getKeyRef().getSub()).dw.get(i) ); |
| 873 | } |
| 874 | |
| 875 | key.setLvl(lvl+1); |
| 876 | } |
| 877 | |
| 878 | /*! \brief Move down (to finer level) the key |
| 879 | * |
| 880 | * \param lvl level |
| 881 | * \param key multi-resolution AMR key |
| 882 | * |
| 883 | */ |
| 884 | grid_dist_key_dx<dim> moveDw(int lvl, const grid_dist_key_dx<dim> & key) |
| 885 | { |
| 886 | #ifdef SE_CLASS1 |
| 887 | |
| 888 | if (lvl >= getNLvl() - 1) |
| 889 | {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the last level, we cannot go one level down" << std::endl;} |
| 890 | |
| 891 | #endif |
| 892 | |
| 893 | grid_dist_key_dx<dim> out; |
| 894 | |
| 895 | for (size_t i = 0 ; i < dim ; i++) |
| 896 | { |
| 897 | out.getKeyRef().set_d(i,(key.getKeyRef().get(i) << 1) + mv_off.get(lvl).get(key.getSub()).dw.get(i) ); |
| 898 | } |
| 899 | |
| 900 | out.setSub(key.getSub()); |
| 901 | |
| 902 | return out; |
| 903 | } |
| 904 | |
| 905 | /*! \brief From a distributed key it return a AMR key that contain also the grid level |
| 906 | * |
| 907 | * \param lvl level |
| 908 | * \param key distributed key |
| 909 | * |
| 910 | */ |
| 911 | inline grid_dist_amr_key<dim> getAMRKey(size_t lvl, grid_dist_key_dx<dim> key) |
| 912 | { |
| 913 | return grid_dist_amr_key<dim>(lvl,key); |
| 914 | } |
| 915 | |
| 916 | /*! \brief Move up (to coarser level) the key |
| 917 | * |
| 918 | * \param key multi-resolution AMR key |
| 919 | * |
| 920 | */ |
| 921 | void moveLvlUp(grid_dist_amr_key<dim> & key) |
| 922 | { |
| 923 | #ifdef SE_CLASS1 |
| 924 | |
| 925 | if (key.getLvl() == 0) |
| 926 | {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the top level, we cannot go one level up" << std::endl;} |
| 927 | |
| 928 | #endif |
| 929 | |
| 930 | auto & key_ref = key.getKeyRef().getKeyRef(); |
| 931 | size_t lvl = key.getLvl(); |
| 932 | |
| 933 | for (size_t i = 0 ; i < dim ; i++) |
| 934 | { |
| 935 | key_ref.set_d(i,(key_ref.get(i) - mv_off.get(key.getLvl()).get(key.getKeyRef().getSub()).up.get(i)) >> 1); |
| 936 | } |
| 937 | |
| 938 | key.setLvl(lvl-1); |
| 939 | } |
| 940 | |
| 941 | /*! \brief Move up (to coarser level) the key |
| 942 | * |
| 943 | * \param lvl level |
| 944 | * \param key multi-resolution AMR key |
| 945 | * |
| 946 | */ |
| 947 | grid_dist_key_dx<dim> moveUp(int lvl, const grid_dist_key_dx<dim> & key) |
| 948 | { |
| 949 | #ifdef SE_CLASS1 |
| 950 | |
| 951 | if (lvl == 0) |
| 952 | {std::cerr << __FILE__ << ":" << __LINE__ << " error: we are already at the top level, we cannot go one level up" << std::endl;} |
| 953 | |
| 954 | #endif |
| 955 | |
| 956 | grid_dist_key_dx<dim> out; |
| 957 | |
| 958 | for (size_t i = 0 ; i < dim ; i++) |
| 959 | { |
| 960 | out.getKeyRef().set_d(i,(key.getKeyRef().get(i) - mv_off.get(lvl).get(key.getSub()).up.get(i)) >> 1); |
| 961 | } |
| 962 | |
| 963 | out.setSub(key.getSub()); |
| 964 | |
| 965 | return out; |
| 966 | } |
| 967 | |
| 968 | /*! \brief Get the position on the grid in global coordinates |
| 969 | * |
| 970 | * \param v1 amr key |
| 971 | * |
| 972 | * \return the position in global coordinates |
| 973 | * |
| 974 | */ |
| 975 | grid_key_dx<dim> getGKey(const grid_dist_amr_key<dim> & v1) |
| 976 | { |
| 977 | return gd_array.get(v1.getLvl()).getGKey(v1.getKey()); |
| 978 | } |
| 979 | |
| 980 | /*! \brief return the spacing for the grid in the level lvl |
| 981 | * |
| 982 | * \param lvl level |
| 983 | * |
| 984 | * \return return the spacing |
| 985 | * |
| 986 | */ |
| 987 | Point<dim,St> getSpacing(size_t lvl) |
| 988 | { |
| 989 | return gd_array.get(lvl).getSpacing(); |
| 990 | } |
| 991 | |
| 992 | /*! \brief Write on vtk file |
| 993 | * |
| 994 | * \param output filename output |
| 995 | * |
| 996 | */ |
| 997 | bool write(std::string output, size_t opt = VTK_WRITER | FORMAT_ASCII ) |
| 998 | { |
| 999 | bool ret = true; |
| 1000 | |
| 1001 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 1002 | { |
| 1003 | ret &= gd_array.get(i).write(output + "_" + std::to_string(i),opt); |
| 1004 | } |
| 1005 | |
| 1006 | return ret; |
| 1007 | } |
| 1008 | |
| 1009 | #ifdef __NVCC__ |
| 1010 | |
| 1011 | /*! \brief Move the memory from the device to host memory |
| 1012 | * |
| 1013 | */ |
| 1014 | template<unsigned int ... prp> void deviceToHost() |
| 1015 | { |
| 1016 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 1017 | { |
| 1018 | gd_array.get(i).template deviceToHost<prp ...>(); |
| 1019 | } |
| 1020 | } |
| 1021 | |
| 1022 | /*! \brief Move the memory from the device to host memory |
| 1023 | * |
| 1024 | */ |
| 1025 | template<unsigned int ... prp> void hostToDevice() |
| 1026 | { |
| 1027 | for (size_t i = 0 ; i < gd_array.size() ; i++) |
| 1028 | { |
| 1029 | gd_array.get(i).template hostToDevice<prp ...>(); |
| 1030 | } |
| 1031 | } |
| 1032 | |
| 1033 | #endif |
| 1034 | }; |
| 1035 | |
| 1036 | template<unsigned int dim, typename St, typename T> |
| 1037 | using sgrid_dist_amr = grid_dist_amr<dim,St,T,AMR_IMPL_TRIVIAL,CartDecomposition<dim,St>,HeapMemory,sgrid_cpu<dim,T,HeapMemory>>; |
| 1038 | |
| 1039 | #ifdef __NVCC__ |
| 1040 | |
| 1041 | template<unsigned int dim, typename St, typename T, unsigned int blockEdgeSize = 8> |
| 1042 | using sgrid_dist_amr_gpu = grid_dist_amr<dim,St,T,AMR_IMPL_TRIVIAL,CartDecomposition<dim,St,CudaMemory,memory_traits_inte>,CudaMemory,SparseGridGpu<dim,T,blockEdgeSize,IntPow<blockEdgeSize,dim>::value >>; |
| 1043 | |
| 1044 | #endif |
| 1045 | |
| 1046 | #endif /* AMR_GRID_AMR_DIST_HPP_ */ |
| 1047 | |