43 #ifndef IFPACK2_BLOCKTRIDICONTAINER_IMPL_HPP 44 #define IFPACK2_BLOCKTRIDICONTAINER_IMPL_HPP 46 #include <Teuchos_Details_MpiTypeTraits.hpp> 48 #include <Tpetra_Details_extractMpiCommFromTeuchos.hpp> 49 #include <Tpetra_Distributor.hpp> 50 #include <Tpetra_BlockMultiVector.hpp> 52 #include <Kokkos_ArithTraits.hpp> 53 #include <KokkosBatched_Util.hpp> 54 #include <KokkosBatched_Vector.hpp> 55 #include <KokkosBatched_Copy_Decl.hpp> 56 #include <KokkosBatched_Copy_Impl.hpp> 57 #include <KokkosBatched_AddRadial_Decl.hpp> 58 #include <KokkosBatched_AddRadial_Impl.hpp> 59 #include <KokkosBatched_SetIdentity_Decl.hpp> 60 #include <KokkosBatched_SetIdentity_Impl.hpp> 61 #include <KokkosBatched_Gemm_Decl.hpp> 62 #include <KokkosBatched_Gemm_Serial_Impl.hpp> 63 #include <KokkosBatched_Gemm_Team_Impl.hpp> 64 #include <KokkosBatched_Gemv_Decl.hpp> 65 #include <KokkosBatched_Gemv_Serial_Impl.hpp> 66 #include <KokkosBatched_Gemv_Team_Impl.hpp> 67 #include <KokkosBatched_Trsm_Decl.hpp> 68 #include <KokkosBatched_Trsm_Serial_Impl.hpp> 69 #include <KokkosBatched_Trsm_Team_Impl.hpp> 70 #include <KokkosBatched_Trsv_Decl.hpp> 71 #include <KokkosBatched_Trsv_Serial_Impl.hpp> 72 #include <KokkosBatched_Trsv_Team_Impl.hpp> 73 #include <KokkosBatched_LU_Decl.hpp> 74 #include <KokkosBatched_LU_Serial_Impl.hpp> 75 #include <KokkosBatched_LU_Team_Impl.hpp> 77 #include <KokkosBlas1_nrm1.hpp> 78 #include <KokkosBlas1_nrm2.hpp> 85 #if defined(KOKKOS_ENABLE_CUDA) && defined(IFPACK2_BLOCKTRIDICONTAINER_ENABLE_PROFILE) 86 #include "cuda_profiler_api.h" 91 #define IFPACK2_BLOCKTRIDICONTAINER_USE_MPI_3 99 #define IFPACK2_BLOCKTRIDICONTAINER_USE_PINNED_MEMORY_FOR_MPI 103 #define IFPACK2_BLOCKTRIDICONTAINER_USE_CUDA_SPACE 106 #if defined(HAVE_IFPACK2_BLOCKTRIDICONTAINER_SMALL_SCALAR) 107 #define IFPACK2_BLOCKTRIDICONTAINER_USE_SMALL_SCALAR_FOR_BLOCKTRIDIAG 111 #define IFPACK2_BLOCKTRIDICONTAINER_USE_EXEC_SPACE_INSTANCES 115 namespace BlockTriDiContainerDetails {
117 namespace KB = KokkosBatched;
124 template <
typename MemoryTraitsType, Kokkos::MemoryTraitsFlags flag>
125 using MemoryTraits = Kokkos::MemoryTraits<MemoryTraitsType::is_unmanaged |
126 MemoryTraitsType::is_random_access |
129 template <
typename ViewType>
130 using Unmanaged = Kokkos::View<
typename ViewType::data_type,
131 typename ViewType::array_layout,
132 typename ViewType::device_type,
133 MemoryTraits<typename ViewType::memory_traits,Kokkos::Unmanaged> >;
134 template <
typename ViewType>
135 using Atomic = Kokkos::View<
typename ViewType::data_type,
136 typename ViewType::array_layout,
137 typename ViewType::device_type,
138 MemoryTraits<typename ViewType::memory_traits,Kokkos::Atomic> >;
139 template <
typename ViewType>
140 using Const = Kokkos::View<
typename ViewType::const_data_type,
141 typename ViewType::array_layout,
142 typename ViewType::device_type,
143 typename ViewType::memory_traits>;
144 template <
typename ViewType>
145 using ConstUnmanaged = Const<Unmanaged<ViewType> >;
147 template <
typename ViewType>
148 using AtomicUnmanaged = Atomic<Unmanaged<ViewType> >;
150 template <
typename ViewType>
151 using Unmanaged = Kokkos::View<
typename ViewType::data_type,
152 typename ViewType::array_layout,
153 typename ViewType::device_type,
154 MemoryTraits<typename ViewType::memory_traits,Kokkos::Unmanaged> >;
157 template <
typename ViewType>
158 using Scratch = Kokkos::View<
typename ViewType::data_type,
159 typename ViewType::array_layout,
160 typename ViewType::execution_space::scratch_memory_space,
161 MemoryTraits<typename ViewType::memory_traits, Kokkos::Unmanaged> >;
167 #if defined(IFPACK2_BLOCKTRIDICONTAINER_USE_SMALL_SCALAR_FOR_BLOCKTRIDIAG) 175 template<
typename T>
struct is_cuda { enum :
bool { value =
false }; };
176 #if defined(KOKKOS_ENABLE_CUDA) 177 template<>
struct is_cuda<Kokkos::Cuda> { enum :
bool { value =
true }; };
183 template<
typename T>
struct is_hip { enum :
bool { value =
false }; };
184 #if defined(KOKKOS_ENABLE_HIP) 185 template<>
struct is_hip<Kokkos::Experimental::HIP> { enum :
bool { value =
true }; };
193 static void createInstance(T &exec_instance) {
196 #if defined(KOKKOS_ENABLE_CUDA) 197 static void createInstance(
const cudaStream_t &s, T &exec_instance) {
203 #if defined(KOKKOS_ENABLE_CUDA) 206 static void createInstance(Kokkos::Cuda &exec_instance) {
207 exec_instance = Kokkos::Cuda();
209 static void createInstance(
const cudaStream_t &s, Kokkos::Cuda &exec_instance) {
210 exec_instance = Kokkos::Cuda(s);
215 #if defined(KOKKOS_ENABLE_HIP) 217 struct ExecutionSpaceFactory<Kokkos::Experimental::HIP> {
218 static void createInstance(Kokkos::Experimental::HIP &exec_instance) {
219 exec_instance = Kokkos::Experimental::HIP();
227 template<
typename CommPtrType>
229 const auto rank = comm->getRank();
230 const auto nranks = comm->getSize();
231 std::stringstream ss;
232 ss <<
"Rank " << rank <<
" of " << nranks <<
": ";
239 template<
typename T,
int N>
242 KOKKOS_INLINE_FUNCTION
244 for (
int i=0;i<N;++i)
247 KOKKOS_INLINE_FUNCTION
249 for (
int i=0;i<N;++i)
253 template<
typename T,
int N>
255 KOKKOS_INLINE_FUNCTION
259 for (
int i=0;i<N;++i)
262 template<
typename T,
int N>
264 KOKKOS_INLINE_FUNCTION
266 operator+=(ArrayValueType<T,N> &a,
267 const ArrayValueType<T,N> &b) {
268 for (
int i=0;i<N;++i)
275 template<
typename T,
int N,
typename ExecSpace>
279 typedef Kokkos::View<value_type,ExecSpace,Kokkos::MemoryTraits<Kokkos::Unmanaged> > result_view_type;
282 KOKKOS_INLINE_FUNCTION
285 KOKKOS_INLINE_FUNCTION
287 for (
int i=0;i<N;++i)
288 dst.v[i] += src.v[i];
290 KOKKOS_INLINE_FUNCTION
292 for (
int i=0;i<N;++i)
293 dst.v[i] += src.v[i];
295 KOKKOS_INLINE_FUNCTION
297 for (
int i=0;i<N;++i)
298 val.v[i] = Kokkos::reduction_identity<T>::sum();
300 KOKKOS_INLINE_FUNCTION
304 KOKKOS_INLINE_FUNCTION
305 result_view_type view()
const {
306 return result_view_type(value);
310 #if defined(HAVE_IFPACK2_BLOCKTRIDICONTAINER_TIMERS) 311 #define IFPACK2_BLOCKTRIDICONTAINER_TIMER(label) TEUCHOS_FUNC_TIME_MONITOR(label); 313 #define IFPACK2_BLOCKTRIDICONTAINER_TIMER(label) 316 #if defined(KOKKOS_ENABLE_CUDA) && defined(IFPACK2_BLOCKTRIDICONTAINER_ENABLE_PROFILE) 317 #define IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN \ 318 CUDA_SAFE_CALL(cudaProfilerStart()); 320 #define IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END \ 321 { CUDA_SAFE_CALL( cudaProfilerStop() ); } 323 #define IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN 325 #define IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END 331 template <
typename MatrixType>
337 typedef typename MatrixType::scalar_type scalar_type;
338 typedef typename MatrixType::local_ordinal_type local_ordinal_type;
339 typedef typename MatrixType::global_ordinal_type global_ordinal_type;
340 typedef typename MatrixType::node_type node_type;
346 typedef typename Kokkos::ArithTraits<impl_scalar_type>::mag_type magnitude_type;
348 typedef typename BlockTridiagScalarType<impl_scalar_type>::type btdm_scalar_type;
349 typedef typename Kokkos::ArithTraits<btdm_scalar_type>::mag_type btdm_magnitude_type;
360 typedef typename node_device_type::execution_space node_execution_space;
361 typedef typename node_device_type::memory_space node_memory_space;
363 #if defined(KOKKOS_ENABLE_CUDA) && defined(IFPACK2_BLOCKTRIDICONTAINER_USE_CUDA_SPACE) 364 typedef node_execution_space execution_space;
366 typedef typename std::conditional<std::is_same<node_memory_space,Kokkos::CudaUVMSpace>::value,
368 node_memory_space>::type memory_space;
369 typedef Kokkos::Device<execution_space,memory_space> device_type;
372 typedef node_execution_space execution_space;
373 typedef node_memory_space memory_space;
376 typedef Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> tpetra_multivector_type;
377 typedef Tpetra::Map<local_ordinal_type,global_ordinal_type,node_type> tpetra_map_type;
378 typedef Tpetra::Import<local_ordinal_type,global_ordinal_type,node_type> tpetra_import_type;
379 typedef Tpetra::RowMatrix<scalar_type,local_ordinal_type,global_ordinal_type,node_type> tpetra_row_matrix_type;
380 typedef Tpetra::BlockCrsMatrix<scalar_type,local_ordinal_type,global_ordinal_type,node_type> tpetra_block_crs_matrix_type;
381 typedef typename tpetra_block_crs_matrix_type::little_block_type tpetra_block_access_view_type;
382 typedef Tpetra::BlockMultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> tpetra_block_multivector_type;
383 typedef typename tpetra_block_crs_matrix_type::crs_graph_type::local_graph_device_type local_crs_graph_type;
388 template<
typename T,
int l>
using Vector = KB::Vector<T,l>;
389 template<
typename T>
using SIMD = KB::SIMD<T>;
390 template<
typename T,
typename M>
using DefaultVectorLength = KB::DefaultVectorLength<T,M>;
391 template<
typename T,
typename M>
using DefaultInternalVectorLength = KB::DefaultInternalVectorLength<T,M>;
393 static constexpr
int vector_length = DefaultVectorLength<btdm_scalar_type,memory_space>::value;
394 static constexpr
int internal_vector_length = DefaultInternalVectorLength<btdm_scalar_type,memory_space>::value;
402 typedef Kokkos::View<local_ordinal_type*,device_type> local_ordinal_type_1d_view;
404 typedef Kokkos::View<impl_scalar_type*,device_type> impl_scalar_type_1d_view;
405 typedef Kokkos::View<impl_scalar_type*,node_device_type> impl_scalar_type_1d_view_tpetra;
408 typedef Kokkos::View<impl_scalar_type**,Kokkos::LayoutLeft,device_type> impl_scalar_type_2d_view;
409 typedef Kokkos::View<impl_scalar_type**,Kokkos::LayoutLeft,node_device_type> impl_scalar_type_2d_view_tpetra;
412 typedef Kokkos::View<vector_type*,device_type> vector_type_1d_view;
413 typedef Kokkos::View<vector_type***,Kokkos::LayoutRight,device_type> vector_type_3d_view;
414 typedef Kokkos::View<internal_vector_type***,Kokkos::LayoutRight,device_type> internal_vector_type_3d_view;
415 typedef Kokkos::View<internal_vector_type****,Kokkos::LayoutRight,device_type> internal_vector_type_4d_view;
416 typedef Kokkos::View<btdm_scalar_type***,Kokkos::LayoutRight,device_type> btdm_scalar_type_3d_view;
417 typedef Kokkos::View<btdm_scalar_type****,Kokkos::LayoutRight,device_type> btdm_scalar_type_4d_view;
423 template<
typename MatrixType>
424 typename Teuchos::RCP<const typename ImplType<MatrixType>::tpetra_import_type>
426 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::CreateBlockCrsTpetraImporter");
428 using tpetra_map_type =
typename impl_type::tpetra_map_type;
429 using tpetra_mv_type =
typename impl_type::tpetra_block_multivector_type;
430 using tpetra_import_type =
typename impl_type::tpetra_import_type;
432 const auto g = A->getCrsGraph();
433 const auto blocksize = A->getBlockSize();
434 const auto src = Teuchos::rcp(
new tpetra_map_type(tpetra_mv_type::makePointMap(*g.getDomainMap(), blocksize)));
435 const auto tgt = Teuchos::rcp(
new tpetra_map_type(tpetra_mv_type::makePointMap(*g.getColMap() , blocksize)));
437 return Teuchos::rcp(
new tpetra_import_type(src, tgt));
445 template<
typename MatrixType>
446 struct AsyncableImport {
448 using impl_type = ImplType<MatrixType>;
454 #if !defined(HAVE_IFPACK2_MPI) 455 typedef int MPI_Request;
456 typedef int MPI_Comm;
458 using scalar_type =
typename impl_type::scalar_type;
462 static int isend(
const MPI_Comm comm,
const char* buf,
int count,
int dest,
int tag, MPI_Request* ireq) {
463 #ifdef HAVE_IFPACK2_MPI 465 int ret = MPI_Isend(const_cast<char*>(buf), count, MPI_CHAR, dest, tag, comm, ireq == NULL ? &ureq : ireq);
466 if (ireq == NULL) MPI_Request_free(&ureq);
473 static int irecv(
const MPI_Comm comm,
char* buf,
int count,
int src,
int tag, MPI_Request* ireq) {
474 #ifdef HAVE_IFPACK2_MPI 476 int ret = MPI_Irecv(buf, count, MPI_CHAR, src, tag, comm, ireq == NULL ? &ureq : ireq);
477 if (ireq == NULL) MPI_Request_free(&ureq);
484 static int waitany(
int count, MPI_Request* reqs,
int* index) {
485 #ifdef HAVE_IFPACK2_MPI 486 return MPI_Waitany(count, reqs, index, MPI_STATUS_IGNORE);
492 static int waitall(
int count, MPI_Request* reqs) {
493 #ifdef HAVE_IFPACK2_MPI 494 return MPI_Waitall(count, reqs, MPI_STATUS_IGNORE);
501 using tpetra_map_type =
typename impl_type::tpetra_map_type;
502 using tpetra_import_type =
typename impl_type::tpetra_import_type;
504 using local_ordinal_type =
typename impl_type::local_ordinal_type;
505 using global_ordinal_type =
typename impl_type::global_ordinal_type;
509 using int_1d_view_host = Kokkos::View<int*,Kokkos::HostSpace>;
510 using local_ordinal_type_1d_view_host = Kokkos::View<local_ordinal_type*,Kokkos::HostSpace>;
512 using execution_space =
typename impl_type::execution_space;
513 using memory_space =
typename impl_type::memory_space;
514 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
516 using size_type_1d_view_host = Kokkos::View<size_type*,Kokkos::HostSpace>;
518 #if defined(KOKKOS_ENABLE_CUDA) 519 using impl_scalar_type_1d_view =
520 typename std::conditional<std::is_same<execution_space,Kokkos::Cuda>::value,
521 # if defined(IFPACK2_BLOCKTRIDICONTAINER_USE_PINNED_MEMORY_FOR_MPI) 522 Kokkos::View<impl_scalar_type*,Kokkos::CudaHostPinnedSpace>,
523 # elif defined(IFPACK2_BLOCKTRIDICONTAINER_USE_CUDA_MEMORY_FOR_MPI) 524 Kokkos::View<impl_scalar_type*,Kokkos::CudaSpace>,
525 # else // no experimental macros are defined 526 typename impl_type::impl_scalar_type_1d_view,
528 typename impl_type::impl_scalar_type_1d_view>::type; 530 using impl_scalar_type_1d_view =
typename impl_type::impl_scalar_type_1d_view;
532 using impl_scalar_type_2d_view =
typename impl_type::impl_scalar_type_2d_view;
533 using impl_scalar_type_2d_view_tpetra =
typename impl_type::impl_scalar_type_2d_view_tpetra;
535 #ifdef HAVE_IFPACK2_MPI 539 impl_scalar_type_2d_view_tpetra remote_multivector;
540 local_ordinal_type blocksize;
543 struct SendRecvPair {
548 SendRecvPair<int_1d_view_host> pids;
549 SendRecvPair<std::vector<MPI_Request> > reqs;
550 SendRecvPair<size_type_1d_view> offset;
551 SendRecvPair<size_type_1d_view_host> offset_host;
552 SendRecvPair<local_ordinal_type_1d_view> lids;
553 SendRecvPair<impl_scalar_type_1d_view> buffer;
555 local_ordinal_type_1d_view dm2cm;
557 #if defined(KOKKOS_ENABLE_CUDA) 558 using cuda_stream_1d_std_vector = std::vector<cudaStream_t>;
559 cuda_stream_1d_std_vector stream;
561 using exec_instance_1d_std_vector = std::vector<execution_space>;
562 exec_instance_1d_std_vector exec_instances;
567 void setOffsetValues(
const Teuchos::ArrayView<const size_t> &lens,
568 const size_type_1d_view &offs) {
570 Kokkos::View<size_t*,Kokkos::HostSpace> lens_host(const_cast<size_t*>(lens.getRawPtr()), lens.size());
571 const auto lens_device = Kokkos::create_mirror_view_and_copy(memory_space(), lens_host);
574 const Kokkos::RangePolicy<execution_space> policy(0,offs.extent(0));
575 const local_ordinal_type lens_size = lens_device.extent(0);
576 Kokkos::parallel_scan
577 (
"AsyncableImport::RangePolicy::setOffsetValues",
578 policy, KOKKOS_LAMBDA(
const local_ordinal_type &i, size_type &update,
const bool &
final) {
581 update += (i < lens_size ? lens_device[i] : 0);
585 void setOffsetValuesHost(
const Teuchos::ArrayView<const size_t> &lens,
586 const size_type_1d_view_host &offs) {
588 Kokkos::View<size_t*,Kokkos::HostSpace> lens_host(const_cast<size_t*>(lens.getRawPtr()), lens.size());
589 const auto lens_device = Kokkos::create_mirror_view_and_copy(memory_space(), lens_host);
593 for (local_ordinal_type i=1,iend=offs.extent(0);i<iend;++i) {
594 offs(i) = offs(i-1) + lens[i-1];
599 void createMpiRequests(
const tpetra_import_type &
import) {
600 Tpetra::Distributor &distributor =
import.getDistributor();
603 const auto pids_from = distributor.getProcsFrom();
605 memcpy(pids.recv.data(), pids_from.getRawPtr(),
sizeof(int)*pids.recv.extent(0));
607 const auto pids_to = distributor.getProcsTo();
609 memcpy(pids.send.data(), pids_to.getRawPtr(),
sizeof(int)*pids.send.extent(0));
612 reqs.recv.resize(pids.recv.extent(0)); memset(reqs.recv.data(), 0, reqs.recv.size()*
sizeof(MPI_Request));
613 reqs.send.resize(pids.send.extent(0)); memset(reqs.send.data(), 0, reqs.send.size()*
sizeof(MPI_Request));
617 const auto lengths_to = distributor.getLengthsTo();
620 const auto lengths_from = distributor.getLengthsFrom();
623 setOffsetValues(lengths_to, offset.send);
624 offset_host.send = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace(), offset.send);
626 setOffsetValues(lengths_from, offset.recv);
627 offset_host.recv = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace(), offset.recv);
629 const auto lengths_to = distributor.getLengthsTo();
630 offset_host.send = size_type_1d_view_host(
do_not_initialize_tag(
"offset send"), lengths_to.size() + 1);
632 const auto lengths_from = distributor.getLengthsFrom();
633 offset_host.recv = size_type_1d_view_host(
do_not_initialize_tag(
"offset recv"), lengths_from.size() + 1);
635 setOffsetValuesHost(lengths_to, offset_host.send);
638 setOffsetValuesHost(lengths_from, offset_host.recv);
643 void createSendRecvIDs(
const tpetra_import_type &
import) {
645 const auto remote_lids =
import.getRemoteLIDs();
646 const local_ordinal_type_1d_view_host
647 remote_lids_view_host(const_cast<local_ordinal_type*>(remote_lids.getRawPtr()), remote_lids.size());
649 Kokkos::deep_copy(lids.recv, remote_lids_view_host);
652 auto epids =
import.getExportPIDs();
653 auto elids =
import.getExportLIDs();
654 TEUCHOS_ASSERT(epids.size() == elids.size());
656 auto lids_send_host = Kokkos::create_mirror_view(lids.send);
659 for (local_ordinal_type cnt=0,i=0,iend=pids.send.extent(0);i<iend;++i) {
660 const auto pid_send_value = pids.send[i];
661 for (local_ordinal_type j=0,jend=epids.size();j<jend;++j)
662 if (epids[j] == pid_send_value) lids_send_host[cnt++] = elids[j];
663 #if !defined(__CUDA_ARCH__) 664 TEUCHOS_ASSERT(static_cast<size_t>(cnt) == offset_host.send[i+1]);
667 Kokkos::deep_copy(lids.send, lids_send_host);
670 void createExecutionSpaceInstances() {
671 #if defined(KOKKOS_ENABLE_CUDA) 672 const local_ordinal_type num_streams = 8;
675 stream.resize(num_streams);
676 exec_instances.clear();
677 exec_instances.resize(num_streams);
678 for (local_ordinal_type i=0;i<num_streams;++i) {
679 CUDA_SAFE_CALL(cudaStreamCreateWithFlags(&stream[i], cudaStreamNonBlocking));
680 ExecutionSpaceFactory<execution_space>::createInstance(stream[i], exec_instances[i]);
686 void destroyExecutionSpaceInstances() {
687 #if defined(KOKKOS_ENABLE_CUDA) 689 const local_ordinal_type num_streams = stream.size();
690 for (local_ordinal_type i=0;i<num_streams;++i)
691 CUDA_SAFE_CALL(cudaStreamDestroy(stream[i]));
694 exec_instances.clear();
701 struct ToMultiVector {};
703 AsyncableImport (
const Teuchos::RCP<const tpetra_map_type>& src_map,
704 const Teuchos::RCP<const tpetra_map_type>& tgt_map,
705 const local_ordinal_type blocksize_,
706 const local_ordinal_type_1d_view dm2cm_) {
707 blocksize = blocksize_;
710 #ifdef HAVE_IFPACK2_MPI 711 comm = Tpetra::Details::extractMpiCommFromTeuchos(*tgt_map->getComm());
713 const tpetra_import_type
import(src_map, tgt_map);
715 createMpiRequests(
import);
716 createSendRecvIDs(
import);
717 createExecutionSpaceInstances();
721 destroyExecutionSpaceInstances();
724 void createDataBuffer(
const local_ordinal_type &num_vectors) {
725 const size_type extent_0 = lids.recv.extent(0)*blocksize;
726 const size_type extent_1 = num_vectors;
727 if (remote_multivector.extent(0) == extent_0 &&
728 remote_multivector.extent(1) == extent_1) {
734 const auto send_buffer_size = offset_host.send[offset_host.send.extent(0)-1]*blocksize*num_vectors;
735 const auto recv_buffer_size = offset_host.recv[offset_host.recv.extent(0)-1]*blocksize*num_vectors;
743 #ifdef HAVE_IFPACK2_MPI 744 waitall(reqs.recv.size(), reqs.recv.data());
745 waitall(reqs.send.size(), reqs.send.data());
754 #if defined(KOKKOS_ENABLE_CUDA) 755 template<
typename PackTag>
757 void copy(
const local_ordinal_type_1d_view &lids_,
758 const impl_scalar_type_1d_view &buffer_,
759 const local_ordinal_type ibeg_,
760 const local_ordinal_type iend_,
761 const impl_scalar_type_2d_view_tpetra &multivector_,
762 const local_ordinal_type blocksize_,
763 const execution_space &exec_instance_) {
764 const local_ordinal_type num_vectors = multivector_.extent(1);
765 const local_ordinal_type mv_blocksize = blocksize_*num_vectors;
766 const local_ordinal_type idiff = iend_ - ibeg_;
767 const auto abase = buffer_.data() + mv_blocksize*ibeg_;
769 using team_policy_type = Kokkos::TeamPolicy<execution_space>;
770 local_ordinal_type vector_size(0);
771 if (blocksize_ <= 4) vector_size = 4;
772 else if (blocksize_ <= 8) vector_size = 8;
773 else if (blocksize_ <= 16) vector_size = 16;
774 else vector_size = 32;
776 const auto work_item_property = Kokkos::Experimental::WorkItemProperty::HintLightWeight;
777 const team_policy_type policy(exec_instance_, idiff, 1, vector_size);
780 Kokkos::Experimental::require(policy, work_item_property),
781 KOKKOS_LAMBDA(
const typename team_policy_type::member_type &member) {
782 const local_ordinal_type i = member.league_rank();
784 (Kokkos::TeamThreadRange(member,num_vectors),[&](
const local_ordinal_type &j) {
785 auto aptr = abase + blocksize_*(i + idiff*j);
786 auto bptr = &multivector_(blocksize_*lids_(i + ibeg_), j);
787 if (std::is_same<PackTag,ToBuffer>::value)
789 (Kokkos::ThreadVectorRange(member,blocksize_),[&](
const local_ordinal_type &k) {
794 (Kokkos::ThreadVectorRange(member,blocksize_),[&](
const local_ordinal_type &k) {
801 void asyncSendRecvVar1(
const impl_scalar_type_2d_view_tpetra &mv) {
802 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::AsyncableImport::AsyncSendRecv");
804 #ifdef HAVE_IFPACK2_MPI 806 const local_ordinal_type num_vectors = mv.extent(1);
807 const local_ordinal_type mv_blocksize = blocksize*num_vectors;
810 for (local_ordinal_type i=0,iend=pids.recv.extent(0);i<iend;++i) {
812 reinterpret_cast<char*>(buffer.recv.data() + offset_host.recv[i]*mv_blocksize),
813 (offset_host.recv[i+1] - offset_host.recv[i])*mv_blocksize*
sizeof(impl_scalar_type),
820 execution_space().fence();
823 for (local_ordinal_type i=0;i<static_cast<local_ordinal_type>(pids.send.extent(0));++i) {
825 if (i<8) exec_instances[i%8].fence();
826 copy<ToBuffer>(lids.send, buffer.send,
827 offset_host.send(i), offset_host.send(i+1),
830 exec_instances[i%8]);
835 for (local_ordinal_type i=0;i<static_cast<local_ordinal_type>(pids.send.extent(0));++i) {
837 if (i<8) exec_instances[i%8].fence();
839 reinterpret_cast<const char*>(buffer.send.data() + offset_host.send[i]*mv_blocksize),
840 (offset_host.send[i+1] - offset_host.send[i])*mv_blocksize*
sizeof(impl_scalar_type),
847 for (local_ordinal_type i=0,iend=pids.recv.extent(0);i<iend;++i) {
850 MPI_Iprobe(pids.recv[i], 42, comm, &flag, &stat);
852 #endif // HAVE_IFPACK2_MPI 855 void syncRecvVar1() {
856 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::AsyncableImport::SyncRecv");
857 #ifdef HAVE_IFPACK2_MPI 859 for (local_ordinal_type i=0;i<static_cast<local_ordinal_type>(pids.recv.extent(0));++i) {
860 local_ordinal_type idx = i;
863 waitany(pids.recv.extent(0), reqs.recv.data(), &idx);
866 copy<ToMultiVector>(lids.recv, buffer.recv,
867 offset_host.recv(idx), offset_host.recv(idx+1),
868 remote_multivector, blocksize,
869 exec_instances[idx%8]);
876 waitall(reqs.send.size(), reqs.send.data());
877 #endif // HAVE_IFPACK2_MPI 879 #endif //defined(KOKKOS_ENABLE_CUDA) 886 template<
typename PackTag>
888 void copy(
const local_ordinal_type_1d_view &lids_,
889 const impl_scalar_type_1d_view &buffer_,
890 const local_ordinal_type &ibeg_,
891 const local_ordinal_type &iend_,
892 const impl_scalar_type_2d_view_tpetra &multivector_,
893 const local_ordinal_type blocksize_) {
894 const local_ordinal_type num_vectors = multivector_.extent(1);
895 const local_ordinal_type mv_blocksize = blocksize_*num_vectors;
896 const local_ordinal_type idiff = iend_ - ibeg_;
897 const auto abase = buffer_.data() + mv_blocksize*ibeg_;
898 if (is_cuda<execution_space>::value || is_hip<execution_space>::value) {
899 #if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) 900 using team_policy_type = Kokkos::TeamPolicy<execution_space>;
901 local_ordinal_type vector_size(0);
902 if (blocksize_ <= 4) vector_size = 4;
903 else if (blocksize_ <= 8) vector_size = 8;
904 else if (blocksize_ <= 16) vector_size = 16;
905 else vector_size = 32;
906 const team_policy_type policy(idiff, 1, vector_size);
908 (
"AsyncableImport::TeamPolicy::copy",
909 policy, KOKKOS_LAMBDA(
const typename team_policy_type::member_type &member) {
910 const local_ordinal_type i = member.league_rank();
912 (Kokkos::TeamThreadRange(member,num_vectors),[&](
const local_ordinal_type &j) {
913 auto aptr = abase + blocksize_*(i + idiff*j);
914 auto bptr = &multivector_(blocksize_*lids_(i + ibeg_), j);
915 if (std::is_same<PackTag,ToBuffer>::value)
917 (Kokkos::ThreadVectorRange(member,blocksize_),[&](
const local_ordinal_type &k) {
922 (Kokkos::ThreadVectorRange(member,blocksize_),[&](
const local_ordinal_type &k) {
929 #if defined(__CUDA_ARCH__) 930 TEUCHOS_TEST_FOR_EXCEPT_MSG(
true,
"Error: CUDA should not see this code");
933 const Kokkos::RangePolicy<execution_space> policy(0, idiff*num_vectors);
935 (
"AsyncableImport::RangePolicy::copy",
936 policy, KOKKOS_LAMBDA(
const local_ordinal_type &ij) {
937 const local_ordinal_type i = ij%idiff;
938 const local_ordinal_type j = ij/idiff;
939 auto aptr = abase + blocksize_*(i + idiff*j);
940 auto bptr = &multivector_(blocksize_*lids_(i + ibeg_), j);
941 auto from = std::is_same<PackTag,ToBuffer>::value ? bptr : aptr;
942 auto to = std::is_same<PackTag,ToBuffer>::value ? aptr : bptr;
943 memcpy(to, from,
sizeof(impl_scalar_type)*blocksize_);
954 void asyncSendRecvVar0(
const impl_scalar_type_2d_view_tpetra &mv) {
955 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::AsyncableImport::AsyncSendRecv");
957 #ifdef HAVE_IFPACK2_MPI 959 const local_ordinal_type num_vectors = mv.extent(1);
960 const local_ordinal_type mv_blocksize = blocksize*num_vectors;
963 for (local_ordinal_type i=0,iend=pids.recv.extent(0);i<iend;++i) {
965 reinterpret_cast<char*>(buffer.recv.data() + offset_host.recv[i]*mv_blocksize),
966 (offset_host.recv[i+1] - offset_host.recv[i])*mv_blocksize*
sizeof(impl_scalar_type),
973 for (local_ordinal_type i=0,iend=pids.send.extent(0);i<iend;++i) {
974 copy<ToBuffer>(lids.send, buffer.send, offset_host.send(i), offset_host.send(i+1),
978 reinterpret_cast<const char*>(buffer.send.data() + offset_host.send[i]*mv_blocksize),
979 (offset_host.send[i+1] - offset_host.send[i])*mv_blocksize*
sizeof(impl_scalar_type),
987 for (local_ordinal_type i=0,iend=pids.recv.extent(0);i<iend;++i) {
990 MPI_Iprobe(pids.recv[i], 42, comm, &flag, &stat);
995 void syncRecvVar0() {
996 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::AsyncableImport::SyncRecv");
997 #ifdef HAVE_IFPACK2_MPI 999 for (local_ordinal_type i=0,iend=pids.recv.extent(0);i<iend;++i) {
1000 local_ordinal_type idx = i;
1001 waitany(pids.recv.extent(0), reqs.recv.data(), &idx);
1002 copy<ToMultiVector>(lids.recv, buffer.recv, offset_host.recv(idx), offset_host.recv(idx+1),
1003 remote_multivector, blocksize);
1006 waitall(reqs.send.size(), reqs.send.data());
1013 void asyncSendRecv(
const impl_scalar_type_2d_view_tpetra &mv) {
1014 #if defined(KOKKOS_ENABLE_CUDA) 1015 #if defined(IFPACK2_BLOCKTRIDICONTAINER_USE_EXEC_SPACE_INSTANCES) 1016 asyncSendRecvVar1(mv);
1018 asyncSendRecvVar0(mv);
1021 asyncSendRecvVar0(mv);
1025 #if defined(KOKKOS_ENABLE_CUDA) 1026 #if defined(IFPACK2_BLOCKTRIDICONTAINER_USE_EXEC_SPACE_INSTANCES) 1036 void syncExchange(
const impl_scalar_type_2d_view_tpetra &mv) {
1037 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::AsyncableImport::SyncExchange");
1042 impl_scalar_type_2d_view_tpetra getRemoteMultiVectorLocalView()
const {
return remote_multivector; }
1048 template<
typename MatrixType>
1049 Teuchos::RCP<AsyncableImport<MatrixType> >
1052 using tpetra_map_type =
typename impl_type::tpetra_map_type;
1053 using local_ordinal_type =
typename impl_type::local_ordinal_type;
1054 using global_ordinal_type =
typename impl_type::global_ordinal_type;
1055 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
1057 const auto g = A->getCrsGraph();
1058 const auto blocksize = A->getBlockSize();
1059 const auto domain_map = g.getDomainMap();
1060 const auto column_map = g.getColMap();
1062 std::vector<global_ordinal_type> gids;
1063 bool separate_remotes =
true, found_first =
false, need_owned_permutation =
false;
1064 for (
size_t i=0;i<column_map->getNodeNumElements();++i) {
1065 const global_ordinal_type gid = column_map->getGlobalElement(i);
1066 if (!domain_map->isNodeGlobalElement(gid)) {
1068 gids.push_back(gid);
1069 }
else if (found_first) {
1070 separate_remotes =
false;
1073 if (!need_owned_permutation &&
1074 domain_map->getLocalElement(gid) !=
static_cast<local_ordinal_type
>(i)) {
1083 need_owned_permutation =
true;
1087 if (separate_remotes) {
1088 const auto invalid = Teuchos::OrdinalTraits<global_ordinal_type>::invalid();
1089 const auto parsimonious_col_map
1090 = Teuchos::rcp(
new tpetra_map_type(invalid, gids.data(), gids.size(), 0, domain_map->getComm()));
1091 if (parsimonious_col_map->getGlobalNumElements() > 0) {
1093 local_ordinal_type_1d_view dm2cm;
1094 if (need_owned_permutation) {
1095 dm2cm = local_ordinal_type_1d_view(
do_not_initialize_tag(
"dm2cm"), domain_map->getNodeNumElements());
1096 const auto dm2cm_host = Kokkos::create_mirror_view(dm2cm);
1097 for (
size_t i=0;i<domain_map->getNodeNumElements();++i)
1098 dm2cm_host(i) = domain_map->getLocalElement(column_map->getGlobalElement(i));
1099 Kokkos::deep_copy(dm2cm, dm2cm_host);
1101 return Teuchos::rcp(
new AsyncableImport<MatrixType>(domain_map, parsimonious_col_map, blocksize, dm2cm));
1104 return Teuchos::null;
1107 template<
typename MatrixType>
1108 struct PartInterface {
1109 using local_ordinal_type =
typename ImplType<MatrixType>::local_ordinal_type;
1110 using local_ordinal_type_1d_view =
typename ImplType<MatrixType>::local_ordinal_type_1d_view;
1112 PartInterface() =
default;
1113 PartInterface(
const PartInterface &b) =
default;
1133 local_ordinal_type_1d_view lclrow;
1135 local_ordinal_type_1d_view partptr;
1138 local_ordinal_type_1d_view packptr;
1141 local_ordinal_type_1d_view part2rowidx0;
1145 local_ordinal_type_1d_view part2packrowidx0;
1146 local_ordinal_type part2packrowidx0_back;
1148 local_ordinal_type_1d_view rowidx2part;
1154 bool row_contiguous;
1156 local_ordinal_type max_partsz;
1162 template<
typename MatrixType>
1163 PartInterface<MatrixType>
1165 const Teuchos::Array<Teuchos::Array<
typename ImplType<MatrixType>::local_ordinal_type> > &partitions) {
1167 using local_ordinal_type =
typename impl_type::local_ordinal_type;
1168 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
1170 constexpr
int vector_length = impl_type::vector_length;
1172 const auto comm = A->getRowMap()->getComm();
1174 PartInterface<MatrixType> interf;
1176 const bool jacobi = partitions.size() == 0;
1177 const local_ordinal_type A_n_lclrows = A->getNodeNumRows();
1178 const local_ordinal_type nparts = jacobi ? A_n_lclrows : partitions.size();
1180 #if defined(BLOCKTRIDICONTAINER_DEBUG) 1181 local_ordinal_type nrows = 0;
1185 for (local_ordinal_type i=0;i<nparts;++i) nrows += partitions[i].size();
1187 TEUCHOS_TEST_FOR_EXCEPT_MSG
1188 (nrows != A_n_lclrows,
get_msg_prefix(comm) <<
"The #rows implied by the local partition is not " 1189 <<
"the same as getNodeNumRows: " << nrows <<
" vs " << A_n_lclrows);
1193 std::vector<local_ordinal_type> p;
1195 interf.max_partsz = 1;
1200 typedef std::pair<local_ordinal_type,local_ordinal_type> size_idx_pair_type;
1201 std::vector<size_idx_pair_type> partsz(nparts);
1202 for (local_ordinal_type i=0;i<nparts;++i)
1203 partsz[i] = size_idx_pair_type(partitions[i].size(), i);
1204 std::sort(partsz.begin(), partsz.end(),
1205 [] (
const size_idx_pair_type& x,
const size_idx_pair_type& y) {
1206 return x.first > y.first;
1208 for (local_ordinal_type i=0;i<nparts;++i)
1209 p[i] = partsz[i].second;
1211 interf.max_partsz = partsz[0].first;
1217 interf.part2rowidx0 = local_ordinal_type_1d_view(
do_not_initialize_tag(
"part2rowidx0"), nparts + 1);
1218 interf.part2packrowidx0 = local_ordinal_type_1d_view(
do_not_initialize_tag(
"part2packrowidx0"), nparts + 1);
1222 const auto partptr = Kokkos::create_mirror_view(interf.partptr);
1223 const auto lclrow = Kokkos::create_mirror_view(interf.lclrow);
1224 const auto part2rowidx0 = Kokkos::create_mirror_view(interf.part2rowidx0);
1225 const auto part2packrowidx0 = Kokkos::create_mirror_view(interf.part2packrowidx0);
1226 const auto rowidx2part = Kokkos::create_mirror_view(interf.rowidx2part);
1229 interf.row_contiguous =
true;
1231 part2rowidx0(0) = 0;
1232 part2packrowidx0(0) = 0;
1233 local_ordinal_type pack_nrows = 0;
1235 for (local_ordinal_type ip=0;ip<nparts;++ip) {
1236 const local_ordinal_type ipnrows = 1;
1237 TEUCHOS_TEST_FOR_EXCEPT_MSG(ipnrows == 0,
1239 <<
"partition " << p[ip]
1240 <<
" is empty, which is not allowed.");
1242 part2rowidx0(ip+1) = part2rowidx0(ip) + ipnrows;
1245 if (ip % vector_length == 0) pack_nrows = ipnrows;
1246 part2packrowidx0(ip+1) = part2packrowidx0(ip) + ((ip+1) % vector_length == 0 || ip+1 == nparts ? pack_nrows : 0);
1247 const local_ordinal_type os = partptr(ip);
1248 for (local_ordinal_type i=0;i<ipnrows;++i) {
1249 const auto lcl_row = ip;
1250 TEUCHOS_TEST_FOR_EXCEPT_MSG(lcl_row < 0 || lcl_row >= A_n_lclrows,
1252 <<
"partitions[" << p[ip] <<
"][" 1253 << i <<
"] = " << lcl_row
1254 <<
" but input matrix implies limits of [0, " << A_n_lclrows-1
1256 lclrow(os+i) = lcl_row;
1257 rowidx2part(os+i) = ip;
1258 if (interf.row_contiguous && os+i > 0 && lclrow((os+i)-1) + 1 != lcl_row)
1259 interf.row_contiguous =
false;
1261 partptr(ip+1) = os + ipnrows;
1264 for (local_ordinal_type ip=0;ip<nparts;++ip) {
1265 const auto* part = &partitions[p[ip]];
1266 const local_ordinal_type ipnrows = part->size();
1267 TEUCHOS_ASSERT(ip == 0 || (ipnrows <= static_cast<local_ordinal_type>(partitions[p[ip-1]].size())));
1268 TEUCHOS_TEST_FOR_EXCEPT_MSG(ipnrows == 0,
1270 <<
"partition " << p[ip]
1271 <<
" is empty, which is not allowed.");
1273 part2rowidx0(ip+1) = part2rowidx0(ip) + ipnrows;
1276 if (ip % vector_length == 0) pack_nrows = ipnrows;
1277 part2packrowidx0(ip+1) = part2packrowidx0(ip) + ((ip+1) % vector_length == 0 || ip+1 == nparts ? pack_nrows : 0);
1278 const local_ordinal_type os = partptr(ip);
1279 for (local_ordinal_type i=0;i<ipnrows;++i) {
1280 const auto lcl_row = (*part)[i];
1281 TEUCHOS_TEST_FOR_EXCEPT_MSG(lcl_row < 0 || lcl_row >= A_n_lclrows,
1283 <<
"partitions[" << p[ip] <<
"][" 1284 << i <<
"] = " << lcl_row
1285 <<
" but input matrix implies limits of [0, " << A_n_lclrows-1
1287 lclrow(os+i) = lcl_row;
1288 rowidx2part(os+i) = ip;
1289 if (interf.row_contiguous && os+i > 0 && lclrow((os+i)-1) + 1 != lcl_row)
1290 interf.row_contiguous =
false;
1292 partptr(ip+1) = os + ipnrows;
1295 #if defined(BLOCKTRIDICONTAINER_DEBUG) 1296 TEUCHOS_ASSERT(partptr(nparts) == nrows);
1298 if (lclrow(0) != 0) interf.row_contiguous =
false;
1300 Kokkos::deep_copy(interf.partptr, partptr);
1301 Kokkos::deep_copy(interf.lclrow, lclrow);
1304 interf.part2rowidx0 = interf.partptr;
1305 Kokkos::deep_copy(interf.part2packrowidx0, part2packrowidx0);
1307 interf.part2packrowidx0_back = part2packrowidx0(part2packrowidx0.extent(0) - 1);
1308 Kokkos::deep_copy(interf.rowidx2part, rowidx2part);
1311 local_ordinal_type npacks = 0;
1312 for (local_ordinal_type ip=1;ip<=nparts;++ip)
1313 if (part2packrowidx0(ip) != part2packrowidx0(ip-1))
1316 const auto packptr = Kokkos::create_mirror_view(interf.packptr);
1318 for (local_ordinal_type ip=1,k=1;ip<=nparts;++ip)
1319 if (part2packrowidx0(ip) != part2packrowidx0(ip-1))
1321 Kokkos::deep_copy(interf.packptr, packptr);
1330 template <
typename MatrixType>
1333 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
1335 using vector_type_3d_view =
typename impl_type::vector_type_3d_view;
1341 size_type_1d_view flat_td_ptr, pack_td_ptr;
1344 local_ordinal_type_1d_view A_colindsub;
1347 vector_type_3d_view values;
1349 bool is_diagonal_only;
1355 template <
typename idx_type>
1356 static KOKKOS_FORCEINLINE_FUNCTION
1357 idx_type IndexToRow (
const idx_type& ind) {
return (ind + 1) / 3; }
1360 template <
typename idx_type>
1361 static KOKKOS_FORCEINLINE_FUNCTION
1362 idx_type RowToIndex (
const idx_type& row) {
return row > 0 ? 3*row - 1 : 0; }
1364 template <
typename idx_type>
1365 static KOKKOS_FORCEINLINE_FUNCTION
1366 idx_type NumBlocks (
const idx_type& nrows) {
return nrows > 0 ? 3*nrows - 2 : 0; }
1373 template<
typename MatrixType>
1377 using execution_space =
typename impl_type::execution_space;
1378 using local_ordinal_type =
typename impl_type::local_ordinal_type;
1379 using size_type =
typename impl_type::size_type;
1380 using size_type_1d_view =
typename impl_type::size_type_1d_view;
1382 constexpr
int vector_length = impl_type::vector_length;
1386 const local_ordinal_type ntridiags = interf.partptr.extent(0) - 1;
1390 const Kokkos::RangePolicy<execution_space> policy(0,ntridiags + 1);
1391 Kokkos::parallel_scan
1392 (
"createBlockTridiags::RangePolicy::flat_td_ptr",
1393 policy, KOKKOS_LAMBDA(
const local_ordinal_type &i, size_type &update,
const bool &
final) {
1395 btdm.flat_td_ptr(i) = update;
1396 if (i < ntridiags) {
1397 const local_ordinal_type nrows = interf.partptr(i+1) - interf.partptr(i);
1398 update += btdm.NumBlocks(nrows);
1402 const auto nblocks = Kokkos::create_mirror_view_and_copy
1403 (Kokkos::HostSpace(), Kokkos::subview(btdm.flat_td_ptr, ntridiags));
1404 btdm.is_diagonal_only = (
static_cast<local_ordinal_type
>(nblocks()) == ntridiags);
1408 if (vector_length == 1) {
1409 btdm.pack_td_ptr = btdm.flat_td_ptr;
1411 const local_ordinal_type npacks = interf.packptr.extent(0) - 1;
1413 const Kokkos::RangePolicy<execution_space> policy(0,npacks);
1414 Kokkos::parallel_scan
1415 (
"createBlockTridiags::RangePolicy::pack_td_ptr",
1416 policy, KOKKOS_LAMBDA(
const local_ordinal_type &i, size_type &update,
const bool &
final) {
1417 const local_ordinal_type parti = interf.packptr(i);
1418 const local_ordinal_type parti_next = interf.packptr(i+1);
1420 const size_type nblks = update;
1421 for (local_ordinal_type pti=parti;pti<parti_next;++pti)
1422 btdm.pack_td_ptr(pti) = nblks;
1423 const local_ordinal_type nrows = interf.partptr(parti+1) - interf.partptr(parti);
1426 btdm.pack_td_ptr(ntridiags) = nblks + btdm.NumBlocks(nrows);
1429 const local_ordinal_type nrows = interf.partptr(parti+1) - interf.partptr(parti);
1430 update += btdm.NumBlocks(nrows);
1450 template<
typename MatrixType>
1452 setTridiagsToIdentity
1453 (
const BlockTridiags<MatrixType>& btdm,
1454 const typename ImplType<MatrixType>::local_ordinal_type_1d_view& packptr)
1456 using impl_type = ImplType<MatrixType>;
1457 using execution_space =
typename impl_type::execution_space;
1458 using local_ordinal_type =
typename impl_type::local_ordinal_type;
1459 using size_type_1d_view =
typename impl_type::size_type_1d_view;
1461 const ConstUnmanaged<size_type_1d_view> pack_td_ptr(btdm.pack_td_ptr);
1462 const local_ordinal_type blocksize = btdm.values.extent(1);
1465 const int vector_length = impl_type::vector_length;
1466 const int internal_vector_length = impl_type::internal_vector_length;
1468 using btdm_scalar_type =
typename impl_type::btdm_scalar_type;
1469 using internal_vector_type =
typename impl_type::internal_vector_type;
1470 using internal_vector_type_4d_view =
1471 typename impl_type::internal_vector_type_4d_view;
1473 using team_policy_type = Kokkos::TeamPolicy<execution_space>;
1474 const internal_vector_type_4d_view values
1475 (reinterpret_cast<internal_vector_type*>(btdm.values.data()),
1476 btdm.values.extent(0),
1477 btdm.values.extent(1),
1478 btdm.values.extent(2),
1479 vector_length/internal_vector_length);
1480 const local_ordinal_type vector_loop_size = values.extent(3);
1481 #if defined(KOKKOS_ENABLE_CUDA) && defined(__CUDA_ARCH__) 1482 local_ordinal_type total_team_size(0);
1483 if (blocksize <= 5) total_team_size = 32;
1484 else if (blocksize <= 9) total_team_size = 64;
1485 else if (blocksize <= 12) total_team_size = 96;
1486 else if (blocksize <= 16) total_team_size = 128;
1487 else if (blocksize <= 20) total_team_size = 160;
1488 else total_team_size = 160;
1489 const local_ordinal_type team_size = total_team_size/vector_loop_size;
1490 const team_policy_type policy(packptr.extent(0)-1, team_size, vector_loop_size);
1491 #elif defined(KOKKOS_ENABLE_HIP) 1496 local_ordinal_type total_team_size(0);
1497 if (blocksize <= 5) total_team_size = 32;
1498 else if (blocksize <= 9) total_team_size = 64;
1499 else if (blocksize <= 12) total_team_size = 96;
1500 else if (blocksize <= 16) total_team_size = 128;
1501 else if (blocksize <= 20) total_team_size = 160;
1502 else total_team_size = 160;
1503 const local_ordinal_type team_size = total_team_size/vector_loop_size;
1504 const team_policy_type policy(packptr.extent(0)-1, team_size, vector_loop_size);
1505 #else // Host architecture: team size is always one 1506 const team_policy_type policy(packptr.extent(0)-1, 1, 1);
1508 Kokkos::parallel_for
1509 (
"setTridiagsToIdentity::TeamPolicy",
1510 policy, KOKKOS_LAMBDA(
const typename team_policy_type::member_type &member) {
1511 const local_ordinal_type k = member.league_rank();
1512 const local_ordinal_type ibeg = pack_td_ptr(packptr(k));
1513 const local_ordinal_type iend = pack_td_ptr(packptr(k+1));
1514 const local_ordinal_type diff = iend - ibeg;
1515 const local_ordinal_type icount = diff/3 + (diff%3 > 0);
1516 const btdm_scalar_type one(1);
1517 Kokkos::parallel_for(Kokkos::ThreadVectorRange(member, vector_loop_size),[&](
const int &v) {
1518 Kokkos::parallel_for(Kokkos::TeamThreadRange(member,icount),[&](
const local_ordinal_type &ii) {
1519 const local_ordinal_type i = ibeg + ii*3;
1520 for (local_ordinal_type j=0;j<blocksize;++j)
1521 values(i,j,j,v) = one;
1531 template <
typename MatrixType>
1534 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
1536 using impl_scalar_type_1d_view_tpetra = Unmanaged<typename impl_type::impl_scalar_type_1d_view_tpetra>;
1538 size_type_1d_view rowptr, rowptr_remote;
1545 local_ordinal_type_1d_view A_colindsub, A_colindsub_remote;
1548 bool is_tpetra_block_crs;
1551 impl_scalar_type_1d_view_tpetra tpetra_values;
1554 AmD(
const AmD &b) =
default;
1560 template<
typename MatrixType>
1563 const PartInterface<MatrixType> &interf,
1566 const bool overlap_communication_and_computation) {
1567 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::SymbolicPhase");
1570 using node_memory_space =
typename impl_type::node_memory_space;
1571 using host_execution_space =
typename impl_type::host_execution_space;
1573 using local_ordinal_type =
typename impl_type::local_ordinal_type;
1574 using global_ordinal_type =
typename impl_type::global_ordinal_type;
1575 using size_type =
typename impl_type::size_type;
1576 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
1577 using size_type_1d_view =
typename impl_type::size_type_1d_view;
1578 using vector_type_3d_view =
typename impl_type::vector_type_3d_view;
1579 using block_crs_matrix_type =
typename impl_type::tpetra_block_crs_matrix_type;
1581 constexpr
int vector_length = impl_type::vector_length;
1583 const auto comm = A->getRowMap()->getComm();
1584 const auto& g = A->getCrsGraph();
1585 const auto blocksize = A->getBlockSize();
1588 const auto partptr = Kokkos::create_mirror_view_and_copy (Kokkos::HostSpace(), interf.partptr);
1589 const auto lclrow = Kokkos::create_mirror_view_and_copy (Kokkos::HostSpace(), interf.lclrow);
1590 const auto rowidx2part = Kokkos::create_mirror_view_and_copy (Kokkos::HostSpace(), interf.rowidx2part);
1591 const auto part2rowidx0 = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace(), interf.part2rowidx0);
1592 const auto packptr = Kokkos::create_mirror_view_and_copy (Kokkos::HostSpace(), interf.packptr);
1594 const local_ordinal_type nrows = partptr(partptr.extent(0) - 1);
1597 Kokkos::View<local_ordinal_type*,host_execution_space> col2row(
"col2row", A->getNodeNumCols());
1598 Kokkos::deep_copy(col2row, Teuchos::OrdinalTraits<local_ordinal_type>::invalid());
1600 const auto rowmap = g.getRowMap();
1601 const auto colmap = g.getColMap();
1602 const auto dommap = g.getDomainMap();
1603 TEUCHOS_ASSERT( !(rowmap.is_null() || colmap.is_null() || dommap.is_null()));
1605 #if !defined(__CUDA_ARCH__) 1606 const Kokkos::RangePolicy<host_execution_space> policy(0,nrows);
1607 Kokkos::parallel_for
1608 (
"performSymbolicPhase::RangePolicy::col2row",
1609 policy, KOKKOS_LAMBDA(
const local_ordinal_type &lr) {
1610 const global_ordinal_type gid = rowmap->getGlobalElement(lr);
1611 TEUCHOS_ASSERT(gid != Teuchos::OrdinalTraits<global_ordinal_type>::invalid());
1612 if (dommap->isNodeGlobalElement(gid)) {
1613 const local_ordinal_type lc = colmap->getLocalElement(gid);
1614 # if defined(BLOCKTRIDICONTAINER_DEBUG) 1615 TEUCHOS_TEST_FOR_EXCEPT_MSG(lc == Teuchos::OrdinalTraits<local_ordinal_type>::invalid(),
1617 <<
" gives an invalid local column.");
1627 const auto local_graph = g.getLocalGraphHost();
1628 const auto local_graph_rowptr = local_graph.row_map;
1629 TEUCHOS_ASSERT(local_graph_rowptr.size() ==
static_cast<size_t>(nrows + 1));
1630 const auto local_graph_colidx = local_graph.entries;
1634 Kokkos::View<local_ordinal_type*,host_execution_space> lclrow2idx(
"lclrow2idx", nrows);
1636 const Kokkos::RangePolicy<host_execution_space> policy(0,nrows);
1637 Kokkos::parallel_for
1638 (
"performSymbolicPhase::RangePolicy::lclrow2idx",
1639 policy, KOKKOS_LAMBDA(
const local_ordinal_type &i) {
1640 lclrow2idx[lclrow(i)] = i;
1646 typename sum_reducer_type::value_type sum_reducer_value;
1648 const Kokkos::RangePolicy<host_execution_space> policy(0,nrows);
1649 Kokkos::parallel_reduce
1652 policy, KOKKOS_LAMBDA(
const local_ordinal_type &lr,
typename sum_reducer_type::value_type &update) {
1654 const local_ordinal_type ri0 = lclrow2idx[lr];
1655 const local_ordinal_type pi0 = rowidx2part(ri0);
1656 for (size_type j=local_graph_rowptr(lr);j<local_graph_rowptr(lr+1);++j) {
1657 const local_ordinal_type lc = local_graph_colidx(j);
1658 const local_ordinal_type lc2r = col2row[lc];
1659 bool incr_R =
false;
1661 if (lc2r == Teuchos::OrdinalTraits<local_ordinal_type>::invalid()) {
1665 const local_ordinal_type ri = lclrow2idx[lc2r];
1666 const local_ordinal_type pi = rowidx2part(ri);
1674 if (ri0 + 1 >= ri && ri0 <= ri + 1)
1680 if (lc < nrows) ++update.v[1];
1684 }, sum_reducer_type(sum_reducer_value));
1686 size_type D_nnz = sum_reducer_value.v[0];
1687 size_type R_nnz_owned = sum_reducer_value.v[1];
1688 size_type R_nnz_remote = sum_reducer_value.v[2];
1690 if (!overlap_communication_and_computation) {
1691 R_nnz_owned += R_nnz_remote;
1697 const auto flat_td_ptr = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace(), btdm.flat_td_ptr);
1699 btdm.A_colindsub = local_ordinal_type_1d_view(
"btdm.A_colindsub", D_nnz);
1700 const auto D_A_colindsub = Kokkos::create_mirror_view(btdm.A_colindsub);
1702 #if defined(BLOCKTRIDICONTAINER_DEBUG) 1703 Kokkos::deep_copy(D_A_colindsub, Teuchos::OrdinalTraits<local_ordinal_type>::invalid());
1706 const local_ordinal_type nparts = partptr.extent(0) - 1;
1708 const Kokkos::RangePolicy<host_execution_space> policy(0, nparts);
1709 Kokkos::parallel_for
1710 (
"performSymbolicPhase::RangePolicy<host_execution_space>::D_graph",
1711 policy, KOKKOS_LAMBDA(
const local_ordinal_type &pi0) {
1712 const local_ordinal_type part_ri0 = part2rowidx0(pi0);
1713 local_ordinal_type offset = 0;
1714 for (local_ordinal_type ri0=partptr(pi0);ri0<partptr(pi0+1);++ri0) {
1715 const local_ordinal_type td_row_os = btdm.RowToIndex(ri0 - part_ri0) + offset;
1717 const local_ordinal_type lr0 = lclrow(ri0);
1718 const size_type j0 = local_graph_rowptr(lr0);
1719 for (size_type j=j0;j<local_graph_rowptr(lr0+1);++j) {
1720 const local_ordinal_type lc = local_graph_colidx(j);
1721 const local_ordinal_type lc2r = col2row[lc];
1722 if (lc2r == Teuchos::OrdinalTraits<local_ordinal_type>::invalid())
continue;
1723 const local_ordinal_type ri = lclrow2idx[lc2r];
1724 const local_ordinal_type pi = rowidx2part(ri);
1725 if (pi != pi0)
continue;
1726 if (ri + 1 < ri0 || ri > ri0 + 1)
continue;
1727 const local_ordinal_type row_entry = j - j0;
1728 D_A_colindsub(flat_td_ptr(pi0) + ((td_row_os + ri) - ri0)) = row_entry;
1733 #if defined(BLOCKTRIDICONTAINER_DEBUG) 1734 for (
size_t i=0;i<D_A_colindsub.extent(0);++i)
1735 TEUCHOS_ASSERT(D_A_colindsub(i) != Teuchos::OrdinalTraits<local_ordinal_type>::invalid());
1737 Kokkos::deep_copy(btdm.A_colindsub, D_A_colindsub);
1741 const auto pack_td_ptr_last = Kokkos::subview(btdm.pack_td_ptr, nparts);
1742 const auto num_packed_blocks = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace(), pack_td_ptr_last);
1743 btdm.values = vector_type_3d_view(
"btdm.values", num_packed_blocks(), blocksize, blocksize);
1744 if (vector_length > 1) setTridiagsToIdentity(btdm, interf.packptr);
1750 amd.rowptr = size_type_1d_view(
"amd.rowptr", nrows + 1);
1751 amd.A_colindsub = local_ordinal_type_1d_view(
do_not_initialize_tag(
"amd.A_colindsub"), R_nnz_owned);
1753 const auto R_rowptr = Kokkos::create_mirror_view(amd.rowptr);
1754 const auto R_A_colindsub = Kokkos::create_mirror_view(amd.A_colindsub);
1756 amd.rowptr_remote = size_type_1d_view(
"amd.rowptr_remote", overlap_communication_and_computation ? nrows + 1 : 0);
1757 amd.A_colindsub_remote = local_ordinal_type_1d_view(
do_not_initialize_tag(
"amd.A_colindsub_remote"), R_nnz_remote);
1759 const auto R_rowptr_remote = Kokkos::create_mirror_view(amd.rowptr_remote);
1760 const auto R_A_colindsub_remote = Kokkos::create_mirror_view(amd.A_colindsub_remote);
1763 const Kokkos::RangePolicy<host_execution_space> policy(0,nrows);
1764 Kokkos::parallel_for
1765 (
"performSymbolicPhase::RangePolicy<host_execution_space>::R_graph_count",
1766 policy, KOKKOS_LAMBDA(
const local_ordinal_type &lr) {
1767 const local_ordinal_type ri0 = lclrow2idx[lr];
1768 const local_ordinal_type pi0 = rowidx2part(ri0);
1769 const size_type j0 = local_graph_rowptr(lr);
1770 for (size_type j=j0;j<local_graph_rowptr(lr+1);++j) {
1771 const local_ordinal_type lc = local_graph_colidx(j);
1772 const local_ordinal_type lc2r = col2row[lc];
1773 if (lc2r != Teuchos::OrdinalTraits<local_ordinal_type>::invalid()) {
1774 const local_ordinal_type ri = lclrow2idx[lc2r];
1775 const local_ordinal_type pi = rowidx2part(ri);
1776 if (pi == pi0 && ri + 1 >= ri0 && ri <= ri0 + 1) {
1781 if (!overlap_communication_and_computation || lc < nrows) {
1784 ++R_rowptr_remote(lr);
1793 Kokkos::RangePolicy<host_execution_space> policy(0,nrows+1);
1794 Kokkos::parallel_scan
1795 (
"performSymbolicPhase::RangePolicy<host_execution_space>::R_graph_fill",
1796 policy, KOKKOS_LAMBDA(
const local_ordinal_type &lr,
1797 update_type &update,
1798 const bool &
final) {
1800 val.v[0] = R_rowptr(lr);
1801 if (overlap_communication_and_computation)
1802 val.v[1] = R_rowptr_remote(lr);
1805 R_rowptr(lr) = update.v[0];
1806 if (overlap_communication_and_computation)
1807 R_rowptr_remote(lr) = update.v[1];
1810 const local_ordinal_type ri0 = lclrow2idx[lr];
1811 const local_ordinal_type pi0 = rowidx2part(ri0);
1813 size_type cnt_rowptr = R_rowptr(lr);
1814 size_type cnt_rowptr_remote = overlap_communication_and_computation ? R_rowptr_remote(lr) : 0;
1816 const size_type j0 = local_graph_rowptr(lr);
1817 for (size_type j=j0;j<local_graph_rowptr(lr+1);++j) {
1818 const local_ordinal_type lc = local_graph_colidx(j);
1819 const local_ordinal_type lc2r = col2row[lc];
1820 if (lc2r != Teuchos::OrdinalTraits<local_ordinal_type>::invalid()) {
1821 const local_ordinal_type ri = lclrow2idx[lc2r];
1822 const local_ordinal_type pi = rowidx2part(ri);
1823 if (pi == pi0 && ri + 1 >= ri0 && ri <= ri0 + 1)
1826 const local_ordinal_type row_entry = j - j0;
1827 if (!overlap_communication_and_computation || lc < nrows)
1828 R_A_colindsub(cnt_rowptr++) = row_entry;
1830 R_A_colindsub_remote(cnt_rowptr_remote++) = row_entry;
1837 TEUCHOS_ASSERT(R_rowptr(nrows) == R_nnz_owned);
1838 Kokkos::deep_copy(amd.rowptr, R_rowptr);
1839 Kokkos::deep_copy(amd.A_colindsub, R_A_colindsub);
1840 if (overlap_communication_and_computation) {
1841 TEUCHOS_ASSERT(R_rowptr_remote(nrows) == R_nnz_remote);
1842 Kokkos::deep_copy(amd.rowptr_remote, R_rowptr_remote);
1843 Kokkos::deep_copy(amd.A_colindsub_remote, R_A_colindsub_remote);
1847 amd.tpetra_values = (
const_cast<block_crs_matrix_type*
>(A.get())->getValuesDeviceNonConst());
1857 template<
typename ArgActiveExecutionMemorySpace>
1862 typedef KB::Mode::Serial mode_type;
1863 #if defined(__KOKKOSBATCHED_INTEL_MKL_COMPACT_BATCHED__) 1864 typedef KB::Algo::Level3::CompactMKL algo_type;
1866 typedef KB::Algo::Level3::Blocked algo_type;
1868 static int recommended_team_size(
const int ,
1876 #if defined(KOKKOS_ENABLE_CUDA) 1877 static inline int ExtractAndFactorizeRecommendedCudaTeamSize(
const int blksize,
1878 const int vector_length,
1879 const int internal_vector_length) {
1880 const int vector_size = vector_length/internal_vector_length;
1881 int total_team_size(0);
1882 if (blksize <= 5) total_team_size = 32;
1883 else if (blksize <= 9) total_team_size = 32;
1884 else if (blksize <= 12) total_team_size = 96;
1885 else if (blksize <= 16) total_team_size = 128;
1886 else if (blksize <= 20) total_team_size = 160;
1887 else total_team_size = 160;
1888 return 2*total_team_size/vector_size;
1891 struct ExtractAndFactorizeTridiagsDefaultModeAndAlgo<Kokkos::CudaSpace> {
1892 typedef KB::Mode::Team mode_type;
1893 typedef KB::Algo::Level3::Unblocked algo_type;
1894 static int recommended_team_size(
const int blksize,
1895 const int vector_length,
1896 const int internal_vector_length) {
1897 return ExtractAndFactorizeRecommendedCudaTeamSize(blksize, vector_length, internal_vector_length);
1901 struct ExtractAndFactorizeTridiagsDefaultModeAndAlgo<Kokkos::CudaUVMSpace> {
1902 typedef KB::Mode::Team mode_type;
1903 typedef KB::Algo::Level3::Unblocked algo_type;
1904 static int recommended_team_size(
const int blksize,
1905 const int vector_length,
1906 const int internal_vector_length) {
1907 return ExtractAndFactorizeRecommendedCudaTeamSize(blksize, vector_length, internal_vector_length);
1912 #if defined(KOKKOS_ENABLE_HIP) 1913 static inline int ExtractAndFactorizeRecommendedHIPTeamSize(
const int blksize,
1914 const int vector_length,
1915 const int internal_vector_length) {
1916 const int vector_size = vector_length/internal_vector_length;
1917 int total_team_size(0);
1918 if (blksize <= 5) total_team_size = 32;
1919 else if (blksize <= 9) total_team_size = 32;
1920 else if (blksize <= 12) total_team_size = 96;
1921 else if (blksize <= 16) total_team_size = 128;
1922 else if (blksize <= 20) total_team_size = 160;
1923 else total_team_size = 160;
1924 return 2*total_team_size/vector_size;
1927 struct ExtractAndFactorizeTridiagsDefaultModeAndAlgo<Kokkos::Experimental::HIPSpace> {
1928 typedef KB::Mode::Team mode_type;
1929 typedef KB::Algo::Level3::Unblocked algo_type;
1930 static int recommended_team_size(
const int blksize,
1931 const int vector_length,
1932 const int internal_vector_length) {
1933 return ExtractAndFactorizeRecommendedHIPTeamSize(blksize, vector_length, internal_vector_length);
1937 struct ExtractAndFactorizeTridiagsDefaultModeAndAlgo<Kokkos::Experimental::HIPHostPinnedSpace> {
1938 typedef KB::Mode::Team mode_type;
1939 typedef KB::Algo::Level3::Unblocked algo_type;
1940 static int recommended_team_size(
const int blksize,
1941 const int vector_length,
1942 const int internal_vector_length) {
1943 return ExtractAndFactorizeRecommendedHIPTeamSize(blksize, vector_length, internal_vector_length);
1948 template<
typename MatrixType>
1949 struct ExtractAndFactorizeTridiags {
1951 using impl_type = ImplType<MatrixType>;
1953 using execution_space =
typename impl_type::execution_space;
1954 using memory_space =
typename impl_type::memory_space;
1956 using local_ordinal_type =
typename impl_type::local_ordinal_type;
1959 using magnitude_type =
typename impl_type::magnitude_type;
1961 using block_crs_matrix_type =
typename impl_type::tpetra_block_crs_matrix_type;
1963 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
1965 using impl_scalar_type_1d_view_tpetra =
typename impl_type::impl_scalar_type_1d_view_tpetra;
1967 using btdm_scalar_type =
typename impl_type::btdm_scalar_type;
1968 using btdm_magnitude_type =
typename impl_type::btdm_magnitude_type;
1969 using vector_type_3d_view =
typename impl_type::vector_type_3d_view;
1970 using internal_vector_type_4d_view =
typename impl_type::internal_vector_type_4d_view;
1971 using btdm_scalar_type_4d_view =
typename impl_type::btdm_scalar_type_4d_view;
1972 using internal_vector_scratch_type_3d_view = Scratch<typename impl_type::internal_vector_type_3d_view>;
1973 using btdm_scalar_scratch_type_3d_view = Scratch<typename impl_type::btdm_scalar_type_3d_view>;
1975 using internal_vector_type =
typename impl_type::internal_vector_type;
1976 static constexpr
int vector_length = impl_type::vector_length;
1977 static constexpr
int internal_vector_length = impl_type::internal_vector_length;
1980 using team_policy_type = Kokkos::TeamPolicy<execution_space>;
1981 using member_type =
typename team_policy_type::member_type;
1985 const ConstUnmanaged<local_ordinal_type_1d_view> partptr, lclrow, packptr;
1986 const local_ordinal_type max_partsz;
1988 using size_type_1d_view_tpetra = Kokkos::View<size_t*,typename impl_type::node_device_type>;
1989 const ConstUnmanaged<size_type_1d_view_tpetra> A_rowptr;
1990 const ConstUnmanaged<impl_scalar_type_1d_view_tpetra> A_values;
1992 const ConstUnmanaged<size_type_1d_view> pack_td_ptr, flat_td_ptr;
1993 const ConstUnmanaged<local_ordinal_type_1d_view> A_colindsub;
1994 const Unmanaged<internal_vector_type_4d_view> internal_vector_values;
1995 const Unmanaged<btdm_scalar_type_4d_view> scalar_values;
1997 const local_ordinal_type blocksize, blocksize_square;
1999 const magnitude_type tiny;
2000 const local_ordinal_type vector_loop_size;
2001 const local_ordinal_type vector_length_value;
2004 ExtractAndFactorizeTridiags(
const BlockTridiags<MatrixType> &btdm_,
2005 const PartInterface<MatrixType> &interf_,
2006 const Teuchos::RCP<const block_crs_matrix_type> &A_,
2007 const magnitude_type& tiny_) :
2009 partptr(interf_.partptr),
2010 lclrow(interf_.lclrow),
2011 packptr(interf_.packptr),
2012 max_partsz(interf_.max_partsz),
2014 A_rowptr(A_->getCrsGraph().getLocalGraphDevice().row_map),
2015 A_values(const_cast<block_crs_matrix_type*>(A_.get())->getValuesDeviceNonConst()),
2017 pack_td_ptr(btdm_.pack_td_ptr),
2018 flat_td_ptr(btdm_.flat_td_ptr),
2019 A_colindsub(btdm_.A_colindsub),
2020 internal_vector_values((internal_vector_type*)btdm_.values.data(),
2021 btdm_.values.extent(0),
2022 btdm_.values.extent(1),
2023 btdm_.values.extent(2),
2024 vector_length/internal_vector_length),
2025 scalar_values((btdm_scalar_type*)btdm_.values.data(),
2026 btdm_.values.extent(0),
2027 btdm_.values.extent(1),
2028 btdm_.values.extent(2),
2030 blocksize(btdm_.values.extent(1)),
2031 blocksize_square(blocksize*blocksize),
2034 vector_loop_size(vector_length/internal_vector_length),
2035 vector_length_value(vector_length) {}
2039 KOKKOS_INLINE_FUNCTION
2041 extract(local_ordinal_type partidx,
2042 local_ordinal_type npacks)
const {
2043 const size_type kps = pack_td_ptr(partidx);
2044 local_ordinal_type kfs[vector_length] = {};
2045 local_ordinal_type ri0[vector_length] = {};
2046 local_ordinal_type nrows[vector_length] = {};
2048 for (local_ordinal_type vi=0;vi<npacks;++vi,++partidx) {
2049 kfs[vi] = flat_td_ptr(partidx);
2050 ri0[vi] = partptr(partidx);
2051 nrows[vi] = partptr(partidx+1) - ri0[vi];
2053 for (local_ordinal_type tr=0,j=0;tr<nrows[0];++tr) {
2054 for (local_ordinal_type e=0;e<3;++e) {
2055 const impl_scalar_type* block[vector_length] = {};
2056 for (local_ordinal_type vi=0;vi<npacks;++vi) {
2057 const size_type Aj = A_rowptr(lclrow(ri0[vi] + tr)) + A_colindsub(kfs[vi] + j);
2058 block[vi] = &A_values(Aj*blocksize_square);
2060 const size_type pi = kps + j;
2062 for (local_ordinal_type ii=0;ii<blocksize;++ii) {
2063 for (local_ordinal_type jj=0;jj<blocksize;++jj) {
2064 const auto idx = ii*blocksize + jj;
2065 auto& v = internal_vector_values(pi, ii, jj, 0);
2066 for (local_ordinal_type vi=0;vi<npacks;++vi)
2067 v[vi] = static_cast<btdm_scalar_type>(block[vi][idx]);
2071 if (nrows[0] == 1)
break;
2072 if (e == 1 && (tr == 0 || tr+1 == nrows[0]))
break;
2073 for (local_ordinal_type vi=1;vi<npacks;++vi) {
2074 if ((e == 0 && nrows[vi] == 1) || (e == 1 && tr+1 == nrows[vi])) {
2083 KOKKOS_INLINE_FUNCTION
2085 extract(
const member_type &member,
2086 const local_ordinal_type &partidxbeg,
2087 const local_ordinal_type &npacks,
2088 const local_ordinal_type &vbeg)
const {
2089 local_ordinal_type kfs_vals[internal_vector_length] = {};
2090 local_ordinal_type ri0_vals[internal_vector_length] = {};
2091 local_ordinal_type nrows_vals[internal_vector_length] = {};
2093 const size_type kps = pack_td_ptr(partidxbeg);
2094 for (local_ordinal_type v=vbeg,vi=0;v<npacks && vi<internal_vector_length;++v,++vi) {
2095 kfs_vals[vi] = flat_td_ptr(partidxbeg+vi);
2096 ri0_vals[vi] = partptr(partidxbeg+vi);
2097 nrows_vals[vi] = partptr(partidxbeg+vi+1) - ri0_vals[vi];
2100 local_ordinal_type j_vals[internal_vector_length] = {};
2101 for (local_ordinal_type tr=0;tr<nrows_vals[0];++tr) {
2102 for (local_ordinal_type v=vbeg,vi=0;v<npacks && vi<internal_vector_length;++v,++vi) {
2103 const local_ordinal_type nrows = nrows_vals[vi];
2105 auto &j = j_vals[vi];
2106 const local_ordinal_type kfs = kfs_vals[vi];
2107 const local_ordinal_type ri0 = ri0_vals[vi];
2108 const local_ordinal_type lbeg = (tr == 0 ? 1 : 0);
2109 const local_ordinal_type lend = (tr == nrows - 1 ? 2 : 3);
2110 for (local_ordinal_type l=lbeg;l<lend;++l,++j) {
2111 const size_type Aj = A_rowptr(lclrow(ri0 + tr)) + A_colindsub(kfs + j);
2112 const impl_scalar_type* block = &A_values(Aj*blocksize_square);
2113 const size_type pi = kps + j;
2114 Kokkos::parallel_for
2115 (Kokkos::TeamThreadRange(member,blocksize),
2116 [&](
const local_ordinal_type &ii) {
2117 for (local_ordinal_type jj=0;jj<blocksize;++jj)
2118 scalar_values(pi, ii, jj, v) = static_cast<btdm_scalar_type>(block[ii*blocksize + jj]);
2126 template<
typename AAViewType,
2127 typename WWViewType>
2128 KOKKOS_INLINE_FUNCTION
2130 factorize(
const member_type &member,
2131 const local_ordinal_type &i0,
2132 const local_ordinal_type &nrows,
2133 const local_ordinal_type &v,
2134 const AAViewType &AA,
2135 const WWViewType &WW)
const {
2136 typedef ExtractAndFactorizeTridiagsDefaultModeAndAlgo
2137 <Kokkos::Impl::ActiveExecutionMemorySpace> default_mode_and_algo_type;
2138 typedef default_mode_and_algo_type::mode_type default_mode_type;
2139 typedef default_mode_and_algo_type::algo_type default_algo_type;
2142 const auto one = Kokkos::ArithTraits<btdm_magnitude_type>::one();
2145 auto A = Kokkos::subview(AA, i0, Kokkos::ALL(), Kokkos::ALL(), v);
2147 default_mode_type,KB::Algo::LU::Unblocked>
2148 ::invoke(member, A , tiny);
2153 local_ordinal_type i = i0;
2154 for (local_ordinal_type tr=1;tr<nrows;++tr,i+=3) {
2155 B.assign_data( &AA(i+1,0,0,v) );
2156 KB::Trsm<member_type,
2157 KB::Side::Left,KB::Uplo::Lower,KB::Trans::NoTranspose,KB::Diag::Unit,
2158 default_mode_type,default_algo_type>
2159 ::invoke(member, one, A, B);
2160 C.assign_data( &AA(i+2,0,0,v) );
2161 KB::Trsm<member_type,
2162 KB::Side::Right,KB::Uplo::Upper,KB::Trans::NoTranspose,KB::Diag::NonUnit,
2163 default_mode_type,default_algo_type>
2164 ::invoke(member, one, A, C);
2165 A.assign_data( &AA(i+3,0,0,v) );
2167 member.team_barrier();
2168 KB::Gemm<member_type,
2169 KB::Trans::NoTranspose,KB::Trans::NoTranspose,
2170 default_mode_type,default_algo_type>
2171 ::invoke(member, -one, C, B, one, A);
2173 default_mode_type,KB::Algo::LU::Unblocked>
2174 ::invoke(member, A, tiny);
2178 auto W = Kokkos::subview(WW, Kokkos::ALL(), Kokkos::ALL(), v);
2179 KB::Copy<member_type,KB::Trans::NoTranspose,default_mode_type>
2180 ::invoke(member, A, W);
2181 KB::SetIdentity<member_type,default_mode_type>
2182 ::invoke(member, A);
2183 member.team_barrier();
2184 KB::Trsm<member_type,
2185 KB::Side::Left,KB::Uplo::Lower,KB::Trans::NoTranspose,KB::Diag::Unit,
2186 default_mode_type,default_algo_type>
2187 ::invoke(member, one, W, A);
2188 KB::Trsm<member_type,
2189 KB::Side::Left,KB::Uplo::Upper,KB::Trans::NoTranspose,KB::Diag::NonUnit,
2190 default_mode_type,default_algo_type>
2191 ::invoke(member, one, W, A);
2197 struct ExtractAndFactorizeTag {};
2199 KOKKOS_INLINE_FUNCTION
2201 operator() (
const ExtractAndFactorizeTag &,
const member_type &member)
const {
2203 const local_ordinal_type packidx = member.league_rank();
2205 const local_ordinal_type partidx = packptr(packidx);
2206 const local_ordinal_type npacks = packptr(packidx+1) - partidx;
2207 const local_ordinal_type i0 = pack_td_ptr(partidx);
2208 const local_ordinal_type nrows = partptr(partidx+1) - partptr(partidx);
2210 internal_vector_scratch_type_3d_view
2211 WW(member.team_scratch(0), blocksize, blocksize, vector_loop_size);
2212 if (vector_loop_size == 1) {
2213 extract(partidx, npacks);
2214 factorize(member, i0, nrows, 0, internal_vector_values, WW);
2216 Kokkos::parallel_for
2217 (Kokkos::ThreadVectorRange(member, vector_loop_size),
2218 [&](
const local_ordinal_type &v) {
2219 const local_ordinal_type vbeg = v*internal_vector_length;
2221 extract(member, partidx+vbeg, npacks, vbeg);
2224 member.team_barrier();
2225 factorize(member, i0, nrows, v, internal_vector_values, WW);
2231 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN;
2232 const local_ordinal_type team_size =
2233 ExtractAndFactorizeTridiagsDefaultModeAndAlgo<typename execution_space::memory_space>::
2234 recommended_team_size(blocksize, vector_length, internal_vector_length);
2235 const local_ordinal_type per_team_scratch = internal_vector_scratch_type_3d_view::
2236 shmem_size(blocksize, blocksize, vector_loop_size);
2238 Kokkos::TeamPolicy<execution_space,ExtractAndFactorizeTag>
2239 policy(packptr.extent(0)-1, team_size, vector_loop_size);
2240 #if defined(KOKKOS_ENABLE_DEPRECATED_CODE) 2241 Kokkos::parallel_for(
"ExtractAndFactorize::TeamPolicy::run<ExtractAndFactorizeTag>",
2242 policy.set_scratch_size(0,Kokkos::PerTeam(per_team_scratch)), *
this);
2244 policy.set_scratch_size(0,Kokkos::PerTeam(per_team_scratch));
2245 Kokkos::parallel_for(
"ExtractAndFactorize::TeamPolicy::run<ExtractAndFactorizeTag>",
2248 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END;
2256 template<
typename MatrixType>
2259 const PartInterface<MatrixType> &interf,
2261 const typename ImplType<MatrixType>::magnitude_type tiny) {
2262 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::NumericPhase");
2263 ExtractAndFactorizeTridiags<MatrixType>
function(btdm, interf, A, tiny);
2270 template<
typename MatrixType>
2274 using execution_space =
typename impl_type::execution_space;
2275 using memory_space =
typename impl_type::memory_space;
2277 using local_ordinal_type =
typename impl_type::local_ordinal_type;
2279 using btdm_scalar_type =
typename impl_type::btdm_scalar_type;
2280 using tpetra_multivector_type =
typename impl_type::tpetra_multivector_type;
2281 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
2282 using vector_type_3d_view =
typename impl_type::vector_type_3d_view;
2283 using impl_scalar_type_2d_view_tpetra =
typename impl_type::impl_scalar_type_2d_view_tpetra;
2284 using const_impl_scalar_type_2d_view_tpetra =
typename impl_scalar_type_2d_view_tpetra::const_type;
2285 static constexpr
int vector_length = impl_type::vector_length;
2287 using member_type =
typename Kokkos::TeamPolicy<execution_space>::member_type;
2291 const ConstUnmanaged<local_ordinal_type_1d_view> partptr;
2292 const ConstUnmanaged<local_ordinal_type_1d_view> packptr;
2293 const ConstUnmanaged<local_ordinal_type_1d_view> part2packrowidx0;
2294 const ConstUnmanaged<local_ordinal_type_1d_view> part2rowidx0;
2295 const ConstUnmanaged<local_ordinal_type_1d_view> lclrow;
2296 const local_ordinal_type blocksize;
2297 const local_ordinal_type num_vectors;
2300 vector_type_3d_view packed_multivector;
2301 const_impl_scalar_type_2d_view_tpetra scalar_multivector;
2303 template<
typename TagType>
2304 KOKKOS_INLINE_FUNCTION
2305 void copy_multivectors(
const local_ordinal_type &j,
2306 const local_ordinal_type &vi,
2307 const local_ordinal_type &pri,
2308 const local_ordinal_type &ri0)
const {
2309 for (local_ordinal_type col=0;col<num_vectors;++col)
2310 for (local_ordinal_type i=0;i<blocksize;++i)
2311 packed_multivector(pri, i, col)[vi] =
static_cast<btdm_scalar_type
>(scalar_multivector(blocksize*lclrow(ri0+j)+i,col));
2317 const vector_type_3d_view &pmv)
2318 : partptr(interf.partptr),
2319 packptr(interf.packptr),
2320 part2packrowidx0(interf.part2packrowidx0),
2321 part2rowidx0(interf.part2rowidx0),
2322 lclrow(interf.lclrow),
2323 blocksize(pmv.extent(1)),
2324 num_vectors(pmv.extent(2)),
2325 packed_multivector(pmv) {}
2329 KOKKOS_INLINE_FUNCTION
2331 operator() (
const local_ordinal_type &packidx)
const {
2332 local_ordinal_type partidx = packptr(packidx);
2333 local_ordinal_type npacks = packptr(packidx+1) - partidx;
2334 const local_ordinal_type pri0 = part2packrowidx0(partidx);
2336 local_ordinal_type ri0[vector_length] = {};
2337 local_ordinal_type nrows[vector_length] = {};
2338 for (local_ordinal_type v=0;v<npacks;++v,++partidx) {
2339 ri0[v] = part2rowidx0(partidx);
2340 nrows[v] = part2rowidx0(partidx+1) - ri0[v];
2342 for (local_ordinal_type j=0;j<nrows[0];++j) {
2343 local_ordinal_type cnt = 1;
2344 for (;cnt<npacks && j!= nrows[cnt];++cnt);
2346 const local_ordinal_type pri = pri0 + j;
2347 for (local_ordinal_type col=0;col<num_vectors;++col)
2348 for (local_ordinal_type i=0;i<blocksize;++i)
2349 for (local_ordinal_type v=0;v<npacks;++v)
2350 packed_multivector(pri, i, col)[v] =
static_cast<btdm_scalar_type
>(scalar_multivector(blocksize*lclrow(ri0[v]+j)+i,col));
2354 KOKKOS_INLINE_FUNCTION
2356 operator() (
const member_type &member)
const {
2357 const local_ordinal_type packidx = member.league_rank();
2358 const local_ordinal_type partidx_begin = packptr(packidx);
2359 const local_ordinal_type npacks = packptr(packidx+1) - partidx_begin;
2360 const local_ordinal_type pri0 = part2packrowidx0(partidx_begin);
2361 Kokkos::parallel_for(Kokkos::ThreadVectorRange(member, npacks), [&](
const local_ordinal_type &v) {
2362 const local_ordinal_type partidx = partidx_begin + v;
2363 const local_ordinal_type ri0 = part2rowidx0(partidx);
2364 const local_ordinal_type nrows = part2rowidx0(partidx+1) - ri0;
2367 const local_ordinal_type pri = pri0;
2368 for (local_ordinal_type col=0;col<num_vectors;++col) {
2369 Kokkos::parallel_for(Kokkos::TeamThreadRange(member, blocksize), [&](
const local_ordinal_type &i) {
2370 packed_multivector(pri, i, col)[v] =
static_cast<btdm_scalar_type
>(scalar_multivector(blocksize*lclrow(ri0)+i,col));
2374 Kokkos::parallel_for(Kokkos::TeamThreadRange(member, nrows), [&](
const local_ordinal_type &j) {
2375 const local_ordinal_type pri = pri0 + j;
2376 for (local_ordinal_type col=0;col<num_vectors;++col)
2377 for (local_ordinal_type i=0;i<blocksize;++i)
2378 packed_multivector(pri, i, col)[v] = static_cast<btdm_scalar_type>(scalar_multivector(blocksize*lclrow(ri0+j)+i,col));
2384 void run(
const const_impl_scalar_type_2d_view_tpetra &scalar_multivector_) {
2385 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN;
2386 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::MultiVectorConverter");
2388 scalar_multivector = scalar_multivector_;
2390 #if defined(KOKKOS_ENABLE_CUDA) 2391 const local_ordinal_type vl = vector_length;
2392 const Kokkos::TeamPolicy<execution_space> policy(packptr.extent(0) - 1, Kokkos::AUTO(), vl);
2393 Kokkos::parallel_for
2394 (
"MultiVectorConverter::TeamPolicy", policy, *
this);
2397 #if defined(KOKKOS_ENABLE_HIP) 2398 const local_ordinal_type vl = vector_length;
2399 const Kokkos::TeamPolicy<execution_space> policy(packptr.extent(0) - 1, Kokkos::AUTO(), vl);
2400 Kokkos::parallel_for
2401 (
"MultiVectorConverter::TeamPolicy", policy, *
this);
2404 #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) 2405 TEUCHOS_TEST_FOR_EXCEPT_MSG(
true,
"Error: device compiler should not see this code");
2407 const Kokkos::RangePolicy<execution_space> policy(0, packptr.extent(0) - 1);
2408 Kokkos::parallel_for
2409 (
"MultiVectorConverter::RangePolicy", policy, *
this);
2412 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END;
2419 template<
typename ArgActiveExecutionMemorySpace>
2424 typedef KB::Mode::Serial mode_type;
2425 typedef KB::Algo::Level2::Unblocked single_vector_algo_type;
2426 #if defined(__KOKKOSBATCHED_INTEL_MKL_COMPACT_BATCHED__) 2427 typedef KB::Algo::Level3::CompactMKL multi_vector_algo_type;
2429 typedef KB::Algo::Level3::Blocked multi_vector_algo_type;
2431 static int recommended_team_size(
const int ,
2438 #if defined(KOKKOS_ENABLE_CUDA) 2439 static inline int SolveTridiagsRecommendedCudaTeamSize(
const int blksize,
2440 const int vector_length,
2441 const int internal_vector_length) {
2442 const int vector_size = vector_length/internal_vector_length;
2443 int total_team_size(0);
2444 if (blksize <= 5) total_team_size = 32;
2445 else if (blksize <= 9) total_team_size = 32;
2446 else if (blksize <= 12) total_team_size = 96;
2447 else if (blksize <= 16) total_team_size = 128;
2448 else if (blksize <= 20) total_team_size = 160;
2449 else total_team_size = 160;
2450 return total_team_size/vector_size;
2454 struct SolveTridiagsDefaultModeAndAlgo<Kokkos::CudaSpace> {
2455 typedef KB::Mode::Team mode_type;
2456 typedef KB::Algo::Level2::Unblocked single_vector_algo_type;
2457 typedef KB::Algo::Level3::Unblocked multi_vector_algo_type;
2458 static int recommended_team_size(
const int blksize,
2459 const int vector_length,
2460 const int internal_vector_length) {
2461 return SolveTridiagsRecommendedCudaTeamSize(blksize, vector_length, internal_vector_length);
2465 struct SolveTridiagsDefaultModeAndAlgo<Kokkos::CudaUVMSpace> {
2466 typedef KB::Mode::Team mode_type;
2467 typedef KB::Algo::Level2::Unblocked single_vector_algo_type;
2468 typedef KB::Algo::Level3::Unblocked multi_vector_algo_type;
2469 static int recommended_team_size(
const int blksize,
2470 const int vector_length,
2471 const int internal_vector_length) {
2472 return SolveTridiagsRecommendedCudaTeamSize(blksize, vector_length, internal_vector_length);
2477 #if defined(KOKKOS_ENABLE_HIP) 2478 static inline int SolveTridiagsRecommendedHIPTeamSize(
const int blksize,
2479 const int vector_length,
2480 const int internal_vector_length) {
2481 const int vector_size = vector_length/internal_vector_length;
2482 int total_team_size(0);
2483 if (blksize <= 5) total_team_size = 32;
2484 else if (blksize <= 9) total_team_size = 32;
2485 else if (blksize <= 12) total_team_size = 96;
2486 else if (blksize <= 16) total_team_size = 128;
2487 else if (blksize <= 20) total_team_size = 160;
2488 else total_team_size = 160;
2489 return total_team_size/vector_size;
2493 struct SolveTridiagsDefaultModeAndAlgo<Kokkos::Experimental::HIPSpace> {
2494 typedef KB::Mode::Team mode_type;
2495 typedef KB::Algo::Level2::Unblocked single_vector_algo_type;
2496 typedef KB::Algo::Level3::Unblocked multi_vector_algo_type;
2497 static int recommended_team_size(
const int blksize,
2498 const int vector_length,
2499 const int internal_vector_length) {
2500 return SolveTridiagsRecommendedHIPTeamSize(blksize, vector_length, internal_vector_length);
2504 struct SolveTridiagsDefaultModeAndAlgo<Kokkos::Experimental::HIPHostPinnedSpace> {
2505 typedef KB::Mode::Team mode_type;
2506 typedef KB::Algo::Level2::Unblocked single_vector_algo_type;
2507 typedef KB::Algo::Level3::Unblocked multi_vector_algo_type;
2508 static int recommended_team_size(
const int blksize,
2509 const int vector_length,
2510 const int internal_vector_length) {
2511 return SolveTridiagsRecommendedHIPTeamSize(blksize, vector_length, internal_vector_length);
2516 template<
typename MatrixType>
2517 struct SolveTridiags {
2519 using impl_type = ImplType<MatrixType>;
2520 using execution_space =
typename impl_type::execution_space;
2522 using local_ordinal_type =
typename impl_type::local_ordinal_type;
2525 using magnitude_type =
typename impl_type::magnitude_type;
2526 using btdm_scalar_type =
typename impl_type::btdm_scalar_type;
2527 using btdm_magnitude_type =
typename impl_type::btdm_magnitude_type;
2529 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
2532 using vector_type_3d_view =
typename impl_type::vector_type_3d_view;
2533 using internal_vector_type_4d_view =
typename impl_type::internal_vector_type_4d_view;
2536 using internal_vector_scratch_type_3d_view = Scratch<typename impl_type::internal_vector_type_3d_view>;
2538 using internal_vector_type =
typename impl_type::internal_vector_type;
2539 static constexpr
int vector_length = impl_type::vector_length;
2540 static constexpr
int internal_vector_length = impl_type::internal_vector_length;
2543 using impl_scalar_type_1d_view =
typename impl_type::impl_scalar_type_1d_view;
2544 using impl_scalar_type_2d_view_tpetra =
typename impl_type::impl_scalar_type_2d_view_tpetra;
2547 using team_policy_type = Kokkos::TeamPolicy<execution_space>;
2548 using member_type =
typename team_policy_type::member_type;
2552 const ConstUnmanaged<local_ordinal_type_1d_view> partptr;
2553 const ConstUnmanaged<local_ordinal_type_1d_view> packptr;
2554 const ConstUnmanaged<local_ordinal_type_1d_view> part2packrowidx0;
2555 const ConstUnmanaged<local_ordinal_type_1d_view> lclrow;
2558 const ConstUnmanaged<size_type_1d_view> pack_td_ptr;
2561 const ConstUnmanaged<internal_vector_type_4d_view> D_internal_vector_values;
2562 const Unmanaged<internal_vector_type_4d_view> X_internal_vector_values;
2564 const local_ordinal_type vector_loop_size;
2567 Unmanaged<impl_scalar_type_2d_view_tpetra> Y_scalar_multivector;
2568 #if defined(__CUDA_ARCH__) 2569 AtomicUnmanaged<impl_scalar_type_1d_view> Z_scalar_vector;
2571 Unmanaged<impl_scalar_type_1d_view> Z_scalar_vector;
2573 const impl_scalar_type df;
2574 const bool compute_diff;
2577 SolveTridiags(
const PartInterface<MatrixType> &interf,
2578 const BlockTridiags<MatrixType> &btdm,
2579 const vector_type_3d_view &pmv,
2580 const impl_scalar_type damping_factor,
2581 const bool is_norm_manager_active)
2584 partptr(interf.partptr),
2585 packptr(interf.packptr),
2586 part2packrowidx0(interf.part2packrowidx0),
2587 lclrow(interf.lclrow),
2589 pack_td_ptr(btdm.pack_td_ptr),
2590 D_internal_vector_values((internal_vector_type*)btdm.values.data(),
2591 btdm.values.extent(0),
2592 btdm.values.extent(1),
2593 btdm.values.extent(2),
2594 vector_length/internal_vector_length),
2595 X_internal_vector_values((internal_vector_type*)pmv.data(),
2599 vector_length/internal_vector_length),
2600 vector_loop_size(vector_length/internal_vector_length),
2601 Y_scalar_multivector(),
2604 compute_diff(is_norm_manager_active)
2610 KOKKOS_INLINE_FUNCTION
2612 copyToFlatMultiVector(
const member_type &member,
2613 const local_ordinal_type partidxbeg,
2614 const local_ordinal_type npacks,
2615 const local_ordinal_type pri0,
2616 const local_ordinal_type v,
2617 const local_ordinal_type blocksize,
2618 const local_ordinal_type num_vectors)
const {
2619 const local_ordinal_type vbeg = v*internal_vector_length;
2620 if (vbeg < npacks) {
2621 local_ordinal_type ri0_vals[internal_vector_length] = {};
2622 local_ordinal_type nrows_vals[internal_vector_length] = {};
2623 for (local_ordinal_type vv=vbeg,vi=0;vv<npacks && vi<internal_vector_length;++vv,++vi) {
2624 const local_ordinal_type partidx = partidxbeg+vv;
2625 ri0_vals[vi] = partptr(partidx);
2626 nrows_vals[vi] = partptr(partidx+1) - ri0_vals[vi];
2629 impl_scalar_type z_partial_sum(0);
2630 if (nrows_vals[0] == 1) {
2631 const local_ordinal_type j=0, pri=pri0;
2633 for (local_ordinal_type vv=vbeg,vi=0;vv<npacks && vi<internal_vector_length;++vv,++vi) {
2634 const local_ordinal_type ri0 = ri0_vals[vi];
2635 const local_ordinal_type nrows = nrows_vals[vi];
2637 Kokkos::parallel_for
2638 (Kokkos::TeamThreadRange(member, blocksize),
2639 [&](
const local_ordinal_type &i) {
2640 const local_ordinal_type row = blocksize*lclrow(ri0+j)+i;
2641 for (local_ordinal_type col=0;col<num_vectors;++col) {
2642 impl_scalar_type &y = Y_scalar_multivector(row,col);
2643 const impl_scalar_type yd = X_internal_vector_values(pri, i, col, v)[vi] - y;
2647 const auto yd_abs = Kokkos::ArithTraits<impl_scalar_type>::abs(yd);
2648 z_partial_sum += yd_abs*yd_abs;
2656 Kokkos::parallel_for
2657 (Kokkos::TeamThreadRange(member, nrows_vals[0]),
2658 [&](
const local_ordinal_type &j) {
2659 const local_ordinal_type pri = pri0 + j;
2660 for (local_ordinal_type vv=vbeg,vi=0;vv<npacks && vi<internal_vector_length;++vv,++vi) {
2661 const local_ordinal_type ri0 = ri0_vals[vi];
2662 const local_ordinal_type nrows = nrows_vals[vi];
2664 for (local_ordinal_type col=0;col<num_vectors;++col) {
2665 for (local_ordinal_type i=0;i<blocksize;++i) {
2666 const local_ordinal_type row = blocksize*lclrow(ri0+j)+i;
2667 impl_scalar_type &y = Y_scalar_multivector(row,col);
2668 const impl_scalar_type yd = X_internal_vector_values(pri, i, col, v)[vi] - y;
2672 const auto yd_abs = Kokkos::ArithTraits<impl_scalar_type>::abs(yd);
2673 z_partial_sum += yd_abs*yd_abs;
2682 Z_scalar_vector(member.league_rank()) += z_partial_sum;
2689 template<
typename WWViewType>
2690 KOKKOS_INLINE_FUNCTION
2692 solveSingleVector(
const member_type &member,
2693 const local_ordinal_type &blocksize,
2694 const local_ordinal_type &i0,
2695 const local_ordinal_type &r0,
2696 const local_ordinal_type &nrows,
2697 const local_ordinal_type &v,
2698 const WWViewType &WW)
const {
2699 typedef SolveTridiagsDefaultModeAndAlgo
2700 <Kokkos::Impl::ActiveExecutionMemorySpace> default_mode_and_algo_type;
2701 typedef default_mode_and_algo_type::mode_type default_mode_type;
2702 typedef default_mode_and_algo_type::single_vector_algo_type default_algo_type;
2705 auto A = D_internal_vector_values.data();
2706 auto X = X_internal_vector_values.data();
2709 const auto one = Kokkos::ArithTraits<btdm_magnitude_type>::one();
2710 const auto zero = Kokkos::ArithTraits<btdm_magnitude_type>::zero();
2714 const local_ordinal_type astep = D_internal_vector_values.stride_0();
2715 const local_ordinal_type as0 = D_internal_vector_values.stride_1();
2716 const local_ordinal_type as1 = D_internal_vector_values.stride_2();
2717 const local_ordinal_type xstep = X_internal_vector_values.stride_0();
2718 const local_ordinal_type xs0 = X_internal_vector_values.stride_1();
2731 KOKKOSBATCHED_TRSV_LOWER_NO_TRANSPOSE_INTERNAL_INVOKE
2732 (default_mode_type,default_algo_type,
2735 blocksize,blocksize,
2740 for (local_ordinal_type tr=1;tr<nrows;++tr) {
2741 member.team_barrier();
2742 KOKKOSBATCHED_GEMV_NO_TRANSPOSE_INTERNAL_INVOKE
2743 (default_mode_type,default_algo_type,
2745 blocksize, blocksize,
2747 A+2*astep, as0, as1,
2751 KOKKOSBATCHED_TRSV_LOWER_NO_TRANSPOSE_INTERNAL_INVOKE
2752 (default_mode_type,default_algo_type,
2755 blocksize,blocksize,
2757 A+3*astep, as0, as1,
2765 KOKKOSBATCHED_TRSV_UPPER_NO_TRANSPOSE_INTERNAL_INVOKE
2766 (default_mode_type,default_algo_type,
2769 blocksize, blocksize,
2774 for (local_ordinal_type tr=nrows;tr>1;--tr) {
2776 member.team_barrier();
2777 KOKKOSBATCHED_GEMV_NO_TRANSPOSE_INTERNAL_INVOKE
2778 (default_mode_type,default_algo_type,
2780 blocksize, blocksize,
2782 A+1*astep, as0, as1,
2786 KOKKOSBATCHED_TRSV_UPPER_NO_TRANSPOSE_INTERNAL_INVOKE
2787 (default_mode_type,default_algo_type,
2790 blocksize, blocksize,
2799 const local_ordinal_type ws0 = WW.stride_0();
2800 auto W = WW.data() + v;
2801 KOKKOSBATCHED_COPY_VECTOR_NO_TRANSPOSE_INTERNAL_INVOKE
2803 member, blocksize, X, xs0, W, ws0);
2804 member.team_barrier();
2805 KOKKOSBATCHED_GEMV_NO_TRANSPOSE_INTERNAL_INVOKE
2806 (default_mode_type,default_algo_type,
2808 blocksize, blocksize,
2817 template<
typename WWViewType>
2818 KOKKOS_INLINE_FUNCTION
2820 solveMultiVector(
const member_type &member,
2821 const local_ordinal_type &,
2822 const local_ordinal_type &i0,
2823 const local_ordinal_type &r0,
2824 const local_ordinal_type &nrows,
2825 const local_ordinal_type &v,
2826 const WWViewType &WW)
const {
2827 typedef SolveTridiagsDefaultModeAndAlgo
2828 <Kokkos::Impl::ActiveExecutionMemorySpace> default_mode_and_algo_type;
2829 typedef default_mode_and_algo_type::mode_type default_mode_type;
2830 typedef default_mode_and_algo_type::multi_vector_algo_type default_algo_type;
2833 const auto one = Kokkos::ArithTraits<btdm_magnitude_type>::one();
2834 const auto zero = Kokkos::ArithTraits<btdm_magnitude_type>::zero();
2837 auto A = Kokkos::subview(D_internal_vector_values, i0, Kokkos::ALL(), Kokkos::ALL(), v);
2838 auto X1 = Kokkos::subview(X_internal_vector_values, r0, Kokkos::ALL(), Kokkos::ALL(), v);
2841 local_ordinal_type i = i0, r = r0;
2846 KB::Trsm<member_type,
2847 KB::Side::Left,KB::Uplo::Lower,KB::Trans::NoTranspose,KB::Diag::Unit,
2848 default_mode_type,default_algo_type>
2849 ::invoke(member, one, A, X1);
2850 for (local_ordinal_type tr=1;tr<nrows;++tr,i+=3) {
2851 A.assign_data( &D_internal_vector_values(i+2,0,0,v) );
2852 X2.assign_data( &X_internal_vector_values(++r,0,0,v) );
2853 member.team_barrier();
2854 KB::Gemm<member_type,
2855 KB::Trans::NoTranspose,KB::Trans::NoTranspose,
2856 default_mode_type,default_algo_type>
2857 ::invoke(member, -one, A, X1, one, X2);
2858 A.assign_data( &D_internal_vector_values(i+3,0,0,v) );
2859 KB::Trsm<member_type,
2860 KB::Side::Left,KB::Uplo::Lower,KB::Trans::NoTranspose,KB::Diag::Unit,
2861 default_mode_type,default_algo_type>
2862 ::invoke(member, one, A, X2);
2863 X1.assign_data( X2.data() );
2867 KB::Trsm<member_type,
2868 KB::Side::Left,KB::Uplo::Upper,KB::Trans::NoTranspose,KB::Diag::NonUnit,
2869 default_mode_type,default_algo_type>
2870 ::invoke(member, one, A, X1);
2871 for (local_ordinal_type tr=nrows;tr>1;--tr) {
2873 A.assign_data( &D_internal_vector_values(i+1,0,0,v) );
2874 X2.assign_data( &X_internal_vector_values(--r,0,0,v) );
2875 member.team_barrier();
2876 KB::Gemm<member_type,
2877 KB::Trans::NoTranspose,KB::Trans::NoTranspose,
2878 default_mode_type,default_algo_type>
2879 ::invoke(member, -one, A, X1, one, X2);
2881 A.assign_data( &D_internal_vector_values(i,0,0,v) );
2882 KB::Trsm<member_type,
2883 KB::Side::Left,KB::Uplo::Upper,KB::Trans::NoTranspose,KB::Diag::NonUnit,
2884 default_mode_type,default_algo_type>
2885 ::invoke(member, one, A, X2);
2886 X1.assign_data( X2.data() );
2890 auto W = Kokkos::subview(WW, Kokkos::ALL(), Kokkos::ALL(), v);
2891 KB::Copy<member_type,KB::Trans::NoTranspose,default_mode_type>
2892 ::invoke(member, X1, W);
2893 member.team_barrier();
2894 KB::Gemm<member_type,
2895 KB::Trans::NoTranspose,KB::Trans::NoTranspose,
2896 default_mode_type,default_algo_type>
2897 ::invoke(member, one, A, W, zero, X1);
2901 template<
int B>
struct SingleVectorTag {};
2902 template<
int B>
struct MultiVectorTag {};
2905 KOKKOS_INLINE_FUNCTION
2907 operator() (
const SingleVectorTag<B> &,
const member_type &member)
const {
2908 const local_ordinal_type packidx = member.league_rank();
2909 const local_ordinal_type partidx = packptr(packidx);
2910 const local_ordinal_type npacks = packptr(packidx+1) - partidx;
2911 const local_ordinal_type pri0 = part2packrowidx0(partidx);
2912 const local_ordinal_type i0 = pack_td_ptr(partidx);
2913 const local_ordinal_type r0 = part2packrowidx0(partidx);
2914 const local_ordinal_type nrows = partptr(partidx+1) - partptr(partidx);
2915 const local_ordinal_type blocksize = (B == 0 ? D_internal_vector_values.extent(1) : B);
2916 const local_ordinal_type num_vectors = 1;
2917 internal_vector_scratch_type_3d_view
2918 WW(member.team_scratch(0), blocksize, 1, vector_loop_size);
2919 Kokkos::single(Kokkos::PerTeam(member), [&]() {
2920 Z_scalar_vector(member.league_rank()) = impl_scalar_type(0);
2922 Kokkos::parallel_for
2923 (Kokkos::ThreadVectorRange(member, vector_loop_size),[&](
const int &v) {
2924 solveSingleVector(member, blocksize, i0, r0, nrows, v, WW);
2925 copyToFlatMultiVector(member, partidx, npacks, pri0, v, blocksize, num_vectors);
2930 KOKKOS_INLINE_FUNCTION
2932 operator() (
const MultiVectorTag<B> &,
const member_type &member)
const {
2933 const local_ordinal_type packidx = member.league_rank();
2934 const local_ordinal_type partidx = packptr(packidx);
2935 const local_ordinal_type npacks = packptr(packidx+1) - partidx;
2936 const local_ordinal_type pri0 = part2packrowidx0(partidx);
2937 const local_ordinal_type i0 = pack_td_ptr(partidx);
2938 const local_ordinal_type r0 = part2packrowidx0(partidx);
2939 const local_ordinal_type nrows = partptr(partidx+1) - partptr(partidx);
2940 const local_ordinal_type blocksize = (B == 0 ? D_internal_vector_values.extent(1) : B);
2941 const local_ordinal_type num_vectors = X_internal_vector_values.extent(2);
2943 internal_vector_scratch_type_3d_view
2944 WW(member.team_scratch(0), blocksize, num_vectors, vector_loop_size);
2945 Kokkos::single(Kokkos::PerTeam(member), [&]() {
2946 Z_scalar_vector(member.league_rank()) = impl_scalar_type(0);
2948 Kokkos::parallel_for
2949 (Kokkos::ThreadVectorRange(member, vector_loop_size),[&](
const int &v) {
2950 solveMultiVector(member, blocksize, i0, r0, nrows, v, WW);
2951 copyToFlatMultiVector(member, partidx, npacks, pri0, v, blocksize, num_vectors);
2955 void run(
const impl_scalar_type_2d_view_tpetra &Y,
2956 const impl_scalar_type_1d_view &Z) {
2957 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN;
2958 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::SolveTridiags");
2961 this->Y_scalar_multivector = Y;
2962 this->Z_scalar_vector = Z;
2964 const local_ordinal_type num_vectors = X_internal_vector_values.extent(2);
2965 const local_ordinal_type blocksize = D_internal_vector_values.extent(1);
2967 const local_ordinal_type team_size =
2968 SolveTridiagsDefaultModeAndAlgo<typename execution_space::memory_space>::
2969 recommended_team_size(blocksize, vector_length, internal_vector_length);
2970 const int per_team_scratch = internal_vector_scratch_type_3d_view
2971 ::shmem_size(blocksize, num_vectors, vector_loop_size);
2973 #if defined(KOKKOS_ENABLE_DEPRECATED_CODE) 2974 #define BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS(B) \ 2975 if (num_vectors == 1) { \ 2976 const Kokkos::TeamPolicy<execution_space,SingleVectorTag<B> > \ 2977 policy(packptr.extent(0) - 1, team_size, vector_loop_size); \ 2978 Kokkos::parallel_for \ 2979 ("SolveTridiags::TeamPolicy::run<SingleVector>", \ 2980 policy.set_scratch_size(0,Kokkos::PerTeam(per_team_scratch)), *this); \ 2982 const Kokkos::TeamPolicy<execution_space,MultiVectorTag<B> > \ 2983 policy(packptr.extent(0) - 1, team_size, vector_loop_size); \ 2984 Kokkos::parallel_for \ 2985 ("SolveTridiags::TeamPolicy::run<MultiVector>", \ 2986 policy.set_scratch_size(0,Kokkos::PerTeam(per_team_scratch)), *this); \ 2989 #define BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS(B) \ 2990 if (num_vectors == 1) { \ 2991 Kokkos::TeamPolicy<execution_space,SingleVectorTag<B> > \ 2992 policy(packptr.extent(0) - 1, team_size, vector_loop_size); \ 2993 policy.set_scratch_size(0,Kokkos::PerTeam(per_team_scratch)); \ 2994 Kokkos::parallel_for \ 2995 ("SolveTridiags::TeamPolicy::run<SingleVector>", \ 2998 Kokkos::TeamPolicy<execution_space,MultiVectorTag<B> > \ 2999 policy(packptr.extent(0) - 1, team_size, vector_loop_size); \ 3000 policy.set_scratch_size(0,Kokkos::PerTeam(per_team_scratch)); \ 3001 Kokkos::parallel_for \ 3002 ("SolveTridiags::TeamPolicy::run<MultiVector>", \ 3006 switch (blocksize) {
3007 case 3: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS( 3);
3008 case 5: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS( 5);
3009 case 7: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS( 7);
3010 case 9: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS( 9);
3011 case 10: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS(10);
3012 case 11: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS(11);
3013 case 16: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS(16);
3014 case 17: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS(17);
3015 case 18: BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS(18);
3016 default : BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS( 0);
3018 #undef BLOCKTRIDICONTAINER_DETAILS_SOLVETRIDIAGS 3020 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END;
3028 const int team_size) {
3029 int total_team_size(0);
3030 if (blksize <= 5) total_team_size = 32;
3031 else if (blksize <= 9) total_team_size = 32;
3032 else if (blksize <= 12) total_team_size = 96;
3033 else if (blksize <= 16) total_team_size = 128;
3034 else if (blksize <= 20) total_team_size = 160;
3035 else total_team_size = 160;
3036 return total_team_size/team_size;
3039 static inline int ComputeResidualVectorRecommendedHIPVectorSize(
const int blksize,
3040 const int team_size) {
3041 int total_team_size(0);
3042 if (blksize <= 5) total_team_size = 32;
3043 else if (blksize <= 9) total_team_size = 32;
3044 else if (blksize <= 12) total_team_size = 96;
3045 else if (blksize <= 16) total_team_size = 128;
3046 else if (blksize <= 20) total_team_size = 160;
3047 else total_team_size = 160;
3048 return total_team_size/team_size;
3051 template<
typename MatrixType>
3052 struct ComputeResidualVector {
3054 using impl_type = ImplType<MatrixType>;
3056 using execution_space =
typename impl_type::execution_space;
3057 using memory_space =
typename impl_type::memory_space;
3059 using local_ordinal_type =
typename impl_type::local_ordinal_type;
3062 using magnitude_type =
typename impl_type::magnitude_type;
3063 using btdm_scalar_type =
typename impl_type::btdm_scalar_type;
3064 using btdm_magnitude_type =
typename impl_type::btdm_magnitude_type;
3066 using local_ordinal_type_1d_view =
typename impl_type::local_ordinal_type_1d_view;
3068 using tpetra_block_access_view_type =
typename impl_type::tpetra_block_access_view_type;
3069 using impl_scalar_type_1d_view =
typename impl_type::impl_scalar_type_1d_view;
3070 using impl_scalar_type_2d_view_tpetra =
typename impl_type::impl_scalar_type_2d_view_tpetra;
3071 using vector_type_3d_view =
typename impl_type::vector_type_3d_view;
3072 using btdm_scalar_type_4d_view =
typename impl_type::btdm_scalar_type_4d_view;
3073 static constexpr
int vector_length = impl_type::vector_length;
3076 using member_type =
typename Kokkos::TeamPolicy<execution_space>::member_type;
3079 enum :
int { max_blocksize = 32 };
3082 ConstUnmanaged<impl_scalar_type_2d_view_tpetra> b;
3083 ConstUnmanaged<impl_scalar_type_2d_view_tpetra> x;
3084 ConstUnmanaged<impl_scalar_type_2d_view_tpetra> x_remote;
3085 Unmanaged<impl_scalar_type_2d_view_tpetra> y;
3086 Unmanaged<vector_type_3d_view> y_packed;
3087 Unmanaged<btdm_scalar_type_4d_view> y_packed_scalar;
3090 const ConstUnmanaged<size_type_1d_view> rowptr, rowptr_remote;
3091 const ConstUnmanaged<local_ordinal_type_1d_view> colindsub, colindsub_remote;
3092 const ConstUnmanaged<impl_scalar_type_1d_view> tpetra_values;
3096 const ConstUnmanaged<Kokkos::View<size_t*,node_device_type> > A_rowptr;
3097 const ConstUnmanaged<Kokkos::View<local_ordinal_type*,node_device_type> > A_colind;
3100 const local_ordinal_type blocksize_requested;
3103 const ConstUnmanaged<local_ordinal_type_1d_view> part2packrowidx0;
3104 const ConstUnmanaged<local_ordinal_type_1d_view> part2rowidx0;
3105 const ConstUnmanaged<local_ordinal_type_1d_view> rowidx2part;
3106 const ConstUnmanaged<local_ordinal_type_1d_view> partptr;
3107 const ConstUnmanaged<local_ordinal_type_1d_view> lclrow;
3108 const ConstUnmanaged<local_ordinal_type_1d_view> dm2cm;
3109 const bool is_dm2cm_active;
3112 template<
typename LocalCrsGraphType>
3113 ComputeResidualVector(
const AmD<MatrixType> &amd,
3114 const LocalCrsGraphType &graph,
3115 const local_ordinal_type &blocksize_requested_,
3116 const PartInterface<MatrixType> &interf,
3117 const local_ordinal_type_1d_view &dm2cm_)
3118 : rowptr(amd.rowptr), rowptr_remote(amd.rowptr_remote),
3119 colindsub(amd.A_colindsub), colindsub_remote(amd.A_colindsub_remote),
3120 tpetra_values(amd.tpetra_values),
3121 A_rowptr(graph.row_map),
3122 A_colind(graph.entries),
3123 blocksize_requested(blocksize_requested_),
3124 part2packrowidx0(interf.part2packrowidx0),
3125 part2rowidx0(interf.part2rowidx0),
3126 rowidx2part(interf.rowidx2part),
3127 partptr(interf.partptr),
3128 lclrow(interf.lclrow),
3130 is_dm2cm_active(dm2cm_.span() > 0)
3135 SerialGemv(
const local_ordinal_type &blocksize,
3136 const impl_scalar_type *
const KOKKOS_RESTRICT AA,
3137 const impl_scalar_type *
const KOKKOS_RESTRICT xx,
3138 impl_scalar_type * KOKKOS_RESTRICT yy)
const {
3139 for (local_ordinal_type k0=0;k0<blocksize;++k0) {
3140 impl_scalar_type val = 0;
3141 const local_ordinal_type offset = k0*blocksize;
3142 #if defined(KOKKOS_ENABLE_PRAGMA_IVDEP) 3145 #if defined(KOKKOS_ENABLE_PRAGMA_UNROLL) 3148 for (local_ordinal_type k1=0;k1<blocksize;++k1)
3149 val += AA[offset+k1]*xx[k1];
3154 template<
typename bbViewType,
typename yyViewType>
3155 KOKKOS_INLINE_FUNCTION
3157 VectorCopy(
const member_type &member,
3158 const local_ordinal_type &blocksize,
3159 const bbViewType &bb,
3160 const yyViewType &yy)
const {
3161 Kokkos::parallel_for(Kokkos::ThreadVectorRange(member, blocksize), [&](
const local_ordinal_type &k0) {
3162 yy(k0) =
static_cast<typename yyViewType::const_value_type
>(bb(k0));
3166 template<
typename AAViewType,
typename xxViewType,
typename yyViewType>
3167 KOKKOS_INLINE_FUNCTION
3169 TeamVectorGemv(
const member_type &member,
3170 const local_ordinal_type &blocksize,
3171 const AAViewType &AA,
3172 const xxViewType &xx,
3173 const yyViewType &yy)
const {
3174 Kokkos::parallel_for
3175 (Kokkos::TeamThreadRange(member, blocksize),
3176 [&](
const local_ordinal_type &k0) {
3177 impl_scalar_type val = 0;
3178 Kokkos::parallel_for
3179 (Kokkos::ThreadVectorRange(member, blocksize),
3180 [&](
const local_ordinal_type &k1) {
3181 val += AA(k0,k1)*xx(k1);
3183 Kokkos::atomic_fetch_add(&yy(k0),
typename yyViewType::const_value_type(-val));
3187 template<
typename AAViewType,
typename xxViewType,
typename yyViewType>
3188 KOKKOS_INLINE_FUNCTION
3190 VectorGemv(
const member_type &member,
3191 const local_ordinal_type &blocksize,
3192 const AAViewType &AA,
3193 const xxViewType &xx,
3194 const yyViewType &yy)
const {
3195 Kokkos::parallel_for
3196 (Kokkos::ThreadVectorRange(member, blocksize),
3197 [&](
const local_ordinal_type &k0) {
3198 impl_scalar_type val(0);
3199 for (local_ordinal_type k1=0;k1<blocksize;++k1) {
3200 val += AA(k0,k1)*xx(k1);
3202 Kokkos::atomic_fetch_add(&yy(k0),
typename yyViewType::const_value_type(-val));
3228 KOKKOS_INLINE_FUNCTION
3230 operator() (
const SeqTag &,
const local_ordinal_type& i)
const {
3231 const local_ordinal_type blocksize = blocksize_requested;
3232 const local_ordinal_type blocksize_square = blocksize*blocksize;
3235 const Kokkos::pair<local_ordinal_type,local_ordinal_type> block_range(0, blocksize);
3236 const local_ordinal_type num_vectors = y.extent(1);
3237 const local_ordinal_type row = i*blocksize;
3238 for (local_ordinal_type col=0;col<num_vectors;++col) {
3240 impl_scalar_type *yy = &y(row, col);
3241 const impl_scalar_type *
const bb = &b(row, col);
3242 memcpy(yy, bb,
sizeof(impl_scalar_type)*blocksize);
3245 const size_type A_k0 = A_rowptr[i];
3246 for (size_type k=rowptr[i];k<rowptr[i+1];++k) {
3247 const size_type j = A_k0 + colindsub[k];
3248 const impl_scalar_type *
const AA = &tpetra_values(j*blocksize_square);
3249 const impl_scalar_type *
const xx = &x(A_colind[j]*blocksize, col);
3250 SerialGemv(blocksize,AA,xx,yy);
3255 KOKKOS_INLINE_FUNCTION
3257 operator() (
const SeqTag &,
const member_type &member)
const {
3260 const local_ordinal_type blocksize = blocksize_requested;
3261 const local_ordinal_type blocksize_square = blocksize*blocksize;
3263 const local_ordinal_type lr = member.league_rank();
3264 const Kokkos::pair<local_ordinal_type,local_ordinal_type> block_range(0, blocksize);
3265 const local_ordinal_type num_vectors = y.extent(1);
3268 auto bb = Kokkos::subview(b, block_range, 0);
3270 auto yy = Kokkos::subview(y, block_range, 0);
3271 auto A_block = ConstUnmanaged<tpetra_block_access_view_type>(NULL, blocksize, blocksize);
3273 const local_ordinal_type row = lr*blocksize;
3274 for (local_ordinal_type col=0;col<num_vectors;++col) {
3276 yy.assign_data(&y(row, col));
3277 bb.assign_data(&b(row, col));
3278 if (member.team_rank() == 0)
3279 VectorCopy(member, blocksize, bb, yy);
3280 member.team_barrier();
3283 const size_type A_k0 = A_rowptr[lr];
3284 Kokkos::parallel_for
3285 (Kokkos::TeamThreadRange(member, rowptr[lr], rowptr[lr+1]),
3286 [&](
const local_ordinal_type &k) {
3287 const size_type j = A_k0 + colindsub[k];
3288 A_block.assign_data( &tpetra_values(j*blocksize_square) );
3289 xx.assign_data( &x(A_colind[j]*blocksize, col) );
3290 VectorGemv(member, blocksize, A_block, xx, yy);
3300 KOKKOS_INLINE_FUNCTION
3302 operator() (
const AsyncTag<B> &,
const local_ordinal_type &rowidx)
const {
3303 const local_ordinal_type blocksize = (B == 0 ? blocksize_requested : B);
3304 const local_ordinal_type blocksize_square = blocksize*blocksize;
3307 const local_ordinal_type partidx = rowidx2part(rowidx);
3308 const local_ordinal_type pri = part2packrowidx0(partidx) + (rowidx - partptr(partidx));
3309 const local_ordinal_type v = partidx % vector_length;
3311 const local_ordinal_type num_vectors = y_packed.extent(2);
3312 const local_ordinal_type num_local_rows = lclrow.extent(0);
3315 impl_scalar_type yy[B == 0 ? max_blocksize : B] = {};
3317 const local_ordinal_type lr = lclrow(rowidx);
3318 const local_ordinal_type row = lr*blocksize;
3319 for (local_ordinal_type col=0;col<num_vectors;++col) {
3321 memcpy(yy, &b(row, col),
sizeof(impl_scalar_type)*blocksize);
3324 const size_type A_k0 = A_rowptr[lr];
3325 for (size_type k=rowptr[lr];k<rowptr[lr+1];++k) {
3326 const size_type j = A_k0 + colindsub[k];
3327 const impl_scalar_type *
const AA = &tpetra_values(j*blocksize_square);
3328 const local_ordinal_type A_colind_at_j = A_colind[j];
3329 if (A_colind_at_j < num_local_rows) {
3330 const auto loc = is_dm2cm_active ? dm2cm[A_colind_at_j] : A_colind_at_j;
3331 const impl_scalar_type *
const xx = &x(loc*blocksize, col);
3332 SerialGemv(blocksize, AA,xx,yy);
3334 const auto loc = A_colind_at_j - num_local_rows;
3335 const impl_scalar_type *
const xx_remote = &x_remote(loc*blocksize, col);
3336 SerialGemv(blocksize, AA,xx_remote,yy);
3340 for (local_ordinal_type k=0;k<blocksize;++k)
3341 y_packed(pri, k, col)[v] = yy[k];
3346 KOKKOS_INLINE_FUNCTION
3348 operator() (
const AsyncTag<B> &,
const member_type &member)
const {
3349 const local_ordinal_type blocksize = (B == 0 ? blocksize_requested : B);
3350 const local_ordinal_type blocksize_square = blocksize*blocksize;
3353 const local_ordinal_type rowidx = member.league_rank();
3354 const local_ordinal_type partidx = rowidx2part(rowidx);
3355 const local_ordinal_type pri = part2packrowidx0(partidx) + (rowidx - partptr(partidx));
3356 const local_ordinal_type v = partidx % vector_length;
3358 const Kokkos::pair<local_ordinal_type,local_ordinal_type> block_range(0, blocksize);
3359 const local_ordinal_type num_vectors = y_packed_scalar.extent(2);
3360 const local_ordinal_type num_local_rows = lclrow.extent(0);
3363 auto bb = Kokkos::subview(b, block_range, 0);
3364 auto xx = Kokkos::subview(x, block_range, 0);
3365 auto xx_remote = Kokkos::subview(x_remote, block_range, 0);
3366 auto yy = Kokkos::subview(y_packed_scalar, 0, block_range, 0, 0);
3367 auto A_block = ConstUnmanaged<tpetra_block_access_view_type>(NULL, blocksize, blocksize);
3369 const local_ordinal_type lr = lclrow(rowidx);
3370 const local_ordinal_type row = lr*blocksize;
3371 for (local_ordinal_type col=0;col<num_vectors;++col) {
3373 bb.assign_data(&b(row, col));
3374 yy.assign_data(&y_packed_scalar(pri, 0, col, v));
3375 if (member.team_rank() == 0)
3376 VectorCopy(member, blocksize, bb, yy);
3377 member.team_barrier();
3380 const size_type A_k0 = A_rowptr[lr];
3381 Kokkos::parallel_for
3382 (Kokkos::TeamThreadRange(member, rowptr[lr], rowptr[lr+1]),
3383 [&](
const local_ordinal_type &k) {
3384 const size_type j = A_k0 + colindsub[k];
3385 A_block.assign_data( &tpetra_values(j*blocksize_square) );
3387 const local_ordinal_type A_colind_at_j = A_colind[j];
3388 if (A_colind_at_j < num_local_rows) {
3389 const auto loc = is_dm2cm_active ? dm2cm[A_colind_at_j] : A_colind_at_j;
3390 xx.assign_data( &x(loc*blocksize, col) );
3391 VectorGemv(member, blocksize, A_block, xx, yy);
3393 const auto loc = A_colind_at_j - num_local_rows;
3394 xx_remote.assign_data( &x_remote(loc*blocksize, col) );
3395 VectorGemv(member, blocksize, A_block, xx_remote, yy);
3401 template <
int P,
int B>
struct OverlapTag {};
3403 template<
int P,
int B>
3405 KOKKOS_INLINE_FUNCTION
3407 operator() (
const OverlapTag<P,B> &,
const local_ordinal_type& rowidx)
const {
3408 const local_ordinal_type blocksize = (B == 0 ? blocksize_requested : B);
3409 const local_ordinal_type blocksize_square = blocksize*blocksize;
3412 const local_ordinal_type partidx = rowidx2part(rowidx);
3413 const local_ordinal_type pri = part2packrowidx0(partidx) + (rowidx - partptr(partidx));
3414 const local_ordinal_type v = partidx % vector_length;
3416 const local_ordinal_type num_vectors = y_packed.extent(2);
3417 const local_ordinal_type num_local_rows = lclrow.extent(0);
3420 impl_scalar_type yy[max_blocksize] = {};
3422 auto colindsub_used = (P == 0 ? colindsub : colindsub_remote);
3423 auto rowptr_used = (P == 0 ? rowptr : rowptr_remote);
3425 const local_ordinal_type lr = lclrow(rowidx);
3426 const local_ordinal_type row = lr*blocksize;
3427 for (local_ordinal_type col=0;col<num_vectors;++col) {
3430 memcpy(yy, &b(row, col),
sizeof(impl_scalar_type)*blocksize);
3433 memset(yy, 0,
sizeof(impl_scalar_type)*blocksize);
3437 const size_type A_k0 = A_rowptr[lr];
3438 for (size_type k=rowptr_used[lr];k<rowptr_used[lr+1];++k) {
3439 const size_type j = A_k0 + colindsub_used[k];
3440 const impl_scalar_type *
const AA = &tpetra_values(j*blocksize_square);
3441 const local_ordinal_type A_colind_at_j = A_colind[j];
3443 const auto loc = is_dm2cm_active ? dm2cm[A_colind_at_j] : A_colind_at_j;
3444 const impl_scalar_type *
const xx = &x(loc*blocksize, col);
3445 SerialGemv(blocksize,AA,xx,yy);
3447 const auto loc = A_colind_at_j - num_local_rows;
3448 const impl_scalar_type *
const xx_remote = &x_remote(loc*blocksize, col);
3449 SerialGemv(blocksize,AA,xx_remote,yy);
3454 for (local_ordinal_type k=0;k<blocksize;++k)
3455 y_packed(pri, k, col)[v] = yy[k];
3457 for (local_ordinal_type k=0;k<blocksize;++k)
3458 y_packed(pri, k, col)[v] += yy[k];
3463 template<
int P,
int B>
3464 KOKKOS_INLINE_FUNCTION
3466 operator() (
const OverlapTag<P,B> &,
const member_type &member)
const {
3467 const local_ordinal_type blocksize = (B == 0 ? blocksize_requested : B);
3468 const local_ordinal_type blocksize_square = blocksize*blocksize;
3471 const local_ordinal_type rowidx = member.league_rank();
3472 const local_ordinal_type partidx = rowidx2part(rowidx);
3473 const local_ordinal_type pri = part2packrowidx0(partidx) + (rowidx - partptr(partidx));
3474 const local_ordinal_type v = partidx % vector_length;
3476 const Kokkos::pair<local_ordinal_type,local_ordinal_type> block_range(0, blocksize);
3477 const local_ordinal_type num_vectors = y_packed_scalar.extent(2);
3478 const local_ordinal_type num_local_rows = lclrow.extent(0);
3481 auto bb = Kokkos::subview(b, block_range, 0);
3483 auto xx_remote = bb;
3484 auto yy = Kokkos::subview(y_packed_scalar, 0, block_range, 0, 0);
3485 auto A_block = ConstUnmanaged<tpetra_block_access_view_type>(NULL, blocksize, blocksize);
3486 auto colindsub_used = (P == 0 ? colindsub : colindsub_remote);
3487 auto rowptr_used = (P == 0 ? rowptr : rowptr_remote);
3489 const local_ordinal_type lr = lclrow(rowidx);
3490 const local_ordinal_type row = lr*blocksize;
3491 for (local_ordinal_type col=0;col<num_vectors;++col) {
3492 yy.assign_data(&y_packed_scalar(pri, 0, col, v));
3495 bb.assign_data(&b(row, col));
3496 if (member.team_rank() == 0)
3497 VectorCopy(member, blocksize, bb, yy);
3498 member.team_barrier();
3502 const size_type A_k0 = A_rowptr[lr];
3503 Kokkos::parallel_for
3504 (Kokkos::TeamThreadRange(member, rowptr_used[lr], rowptr_used[lr+1]),
3505 [&](
const local_ordinal_type &k) {
3506 const size_type j = A_k0 + colindsub_used[k];
3507 A_block.assign_data( &tpetra_values(j*blocksize_square) );
3509 const local_ordinal_type A_colind_at_j = A_colind[j];
3511 const auto loc = is_dm2cm_active ? dm2cm[A_colind_at_j] : A_colind_at_j;
3512 xx.assign_data( &x(loc*blocksize, col) );
3513 VectorGemv(member, blocksize, A_block, xx, yy);
3515 const auto loc = A_colind_at_j - num_local_rows;
3516 xx_remote.assign_data( &x_remote(loc*blocksize, col) );
3517 VectorGemv(member, blocksize, A_block, xx_remote, yy);
3524 template<
typename MultiVectorLocalViewTypeY,
3525 typename MultiVectorLocalViewTypeB,
3526 typename MultiVectorLocalViewTypeX>
3527 void run(
const MultiVectorLocalViewTypeY &y_,
3528 const MultiVectorLocalViewTypeB &b_,
3529 const MultiVectorLocalViewTypeX &x_) {
3530 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN;
3531 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::ComputeResidual::<SeqTag>");
3533 y = y_; b = b_; x = x_;
3534 if (is_cuda<execution_space>::value) {
3535 #if defined(KOKKOS_ENABLE_CUDA) 3536 const local_ordinal_type blocksize = blocksize_requested;
3537 const local_ordinal_type team_size = 8;
3539 const Kokkos::TeamPolicy<execution_space,SeqTag> policy(rowptr.extent(0) - 1, team_size, vector_size);
3540 Kokkos::parallel_for
3541 (
"ComputeResidual::TeamPolicy::run<SeqTag>", policy, *
this);
3543 }
else if(is_hip<execution_space>::value) {
3544 #if defined(KOKKOS_ENABLE_HIP) 3545 const local_ordinal_type blocksize = blocksize_requested;
3546 const local_ordinal_type team_size = 8;
3547 const local_ordinal_type vector_size = ComputeResidualVectorRecommendedHIPVectorSize(blocksize, team_size);
3548 const Kokkos::TeamPolicy<execution_space,SeqTag> policy(rowptr.extent(0) - 1, team_size, vector_size);
3549 Kokkos::parallel_for
3550 (
"ComputeResidual::TeamPolicy::run<SeqTag>", policy, *
this);
3553 #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) 3554 TEUCHOS_TEST_FOR_EXCEPT_MSG(
true,
"Error: device compiler should not see this code");
3556 const Kokkos::RangePolicy<execution_space,SeqTag> policy(0, rowptr.extent(0) - 1);
3557 Kokkos::parallel_for
3558 (
"ComputeResidual::RangePolicy::run<SeqTag>", policy, *
this);
3561 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END;
3565 template<
typename MultiVectorLocalViewTypeB,
3566 typename MultiVectorLocalViewTypeX,
3567 typename MultiVectorLocalViewTypeX_Remote>
3568 void run(
const vector_type_3d_view &y_packed_,
3569 const MultiVectorLocalViewTypeB &b_,
3570 const MultiVectorLocalViewTypeX &x_,
3571 const MultiVectorLocalViewTypeX_Remote &x_remote_) {
3572 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN;
3573 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::ComputeResidual::<AsyncTag>");
3575 b = b_; x = x_; x_remote = x_remote_;
3576 if (is_cuda<execution_space>::value) {
3577 #if defined(KOKKOS_ENABLE_CUDA) 3578 y_packed_scalar = btdm_scalar_type_4d_view((btdm_scalar_type*)y_packed_.data(),
3579 y_packed_.extent(0),
3580 y_packed_.extent(1),
3581 y_packed_.extent(2),
3584 }
else if (is_hip<execution_space>::value) {
3585 #if defined(KOKKOS_ENABLE_HIP) 3586 y_packed_scalar = btdm_scalar_type_4d_view((btdm_scalar_type*)y_packed_.data(),
3587 y_packed_.extent(0),
3588 y_packed_.extent(1),
3589 y_packed_.extent(2),
3593 y_packed = y_packed_;
3596 if (is_cuda<execution_space>::value) {
3597 #if defined(KOKKOS_ENABLE_CUDA) 3598 const local_ordinal_type blocksize = blocksize_requested;
3599 const local_ordinal_type team_size = 8;
3605 #define BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(B) { \ 3606 const Kokkos::TeamPolicy<execution_space,AsyncTag<B> > \ 3607 policy(rowidx2part.extent(0), team_size, vector_size); \ 3608 Kokkos::parallel_for \ 3609 ("ComputeResidual::TeamPolicy::run<AsyncTag>", \ 3610 policy, *this); } break 3611 switch (blocksize_requested) {
3612 case 3: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 3);
3613 case 5: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 5);
3614 case 7: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 7);
3615 case 9: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 9);
3616 case 10: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(10);
3617 case 11: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(11);
3618 case 16: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(16);
3619 case 17: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(17);
3620 case 18: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(18);
3621 default : BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 0);
3623 #undef BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL 3625 }
else if (is_hip<execution_space>::value) {
3626 #if defined(KOKKOS_ENABLE_HIP) 3627 const local_ordinal_type blocksize = blocksize_requested;
3628 const local_ordinal_type team_size = 8;
3629 const local_ordinal_type vector_size = ComputeResidualVectorRecommendedHIPVectorSize(blocksize, team_size);
3634 #define BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(B) { \ 3635 const Kokkos::TeamPolicy<execution_space,AsyncTag<B> > \ 3636 policy(rowidx2part.extent(0), team_size, vector_size); \ 3637 Kokkos::parallel_for \ 3638 ("ComputeResidual::TeamPolicy::run<AsyncTag>", \ 3639 policy, *this); } break 3640 switch (blocksize_requested) {
3641 case 3: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 3);
3642 case 5: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 5);
3643 case 7: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 7);
3644 case 9: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 9);
3645 case 10: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(10);
3646 case 11: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(11);
3647 case 16: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(16);
3648 case 17: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(17);
3649 case 18: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(18);
3650 default : BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 0);
3652 #undef BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL 3655 #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) 3656 TEUCHOS_TEST_FOR_EXCEPT_MSG(
true,
"Error: device compiler should not see this code");
3658 #define BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(B) { \ 3659 const Kokkos::RangePolicy<execution_space,AsyncTag<B> > policy(0, rowidx2part.extent(0)); \ 3660 Kokkos::parallel_for \ 3661 ("ComputeResidual::RangePolicy::run<AsyncTag>", \ 3662 policy, *this); } break 3663 switch (blocksize_requested) {
3664 case 3: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 3);
3665 case 5: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 5);
3666 case 7: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 7);
3667 case 9: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 9);
3668 case 10: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(10);
3669 case 11: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(11);
3670 case 16: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(16);
3671 case 17: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(17);
3672 case 18: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(18);
3673 default : BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 0);
3675 #undef BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL 3678 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END;
3682 template<
typename MultiVectorLocalViewTypeB,
3683 typename MultiVectorLocalViewTypeX,
3684 typename MultiVectorLocalViewTypeX_Remote>
3685 void run(
const vector_type_3d_view &y_packed_,
3686 const MultiVectorLocalViewTypeB &b_,
3687 const MultiVectorLocalViewTypeX &x_,
3688 const MultiVectorLocalViewTypeX_Remote &x_remote_,
3689 const bool compute_owned) {
3690 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN;
3691 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::ComputeResidual::<OverlapTag>");
3693 b = b_; x = x_; x_remote = x_remote_;
3694 if (is_cuda<execution_space>::value) {
3695 #if defined(KOKKOS_ENABLE_CUDA) 3696 y_packed_scalar = btdm_scalar_type_4d_view((btdm_scalar_type*)y_packed_.data(),
3697 y_packed_.extent(0),
3698 y_packed_.extent(1),
3699 y_packed_.extent(2),
3702 }
else if (is_hip<execution_space>::value) {
3703 #if defined(KOKKOS_ENABLE_HIP) 3704 y_packed_scalar = btdm_scalar_type_4d_view((btdm_scalar_type*)y_packed_.data(),
3705 y_packed_.extent(0),
3706 y_packed_.extent(1),
3707 y_packed_.extent(2),
3711 y_packed = y_packed_;
3714 if (is_cuda<execution_space>::value) {
3715 #if defined(KOKKOS_ENABLE_CUDA) 3716 const local_ordinal_type blocksize = blocksize_requested;
3717 const local_ordinal_type team_size = 8;
3723 #define BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(B) \ 3724 if (compute_owned) { \ 3725 const Kokkos::TeamPolicy<execution_space,OverlapTag<0,B> > \ 3726 policy(rowidx2part.extent(0), team_size, vector_size); \ 3727 Kokkos::parallel_for \ 3728 ("ComputeResidual::TeamPolicy::run<OverlapTag<0> >", policy, *this); \ 3730 const Kokkos::TeamPolicy<execution_space,OverlapTag<1,B> > \ 3731 policy(rowidx2part.extent(0), team_size, vector_size); \ 3732 Kokkos::parallel_for \ 3733 ("ComputeResidual::TeamPolicy::run<OverlapTag<1> >", policy, *this); \ 3735 switch (blocksize_requested) {
3736 case 3: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 3);
3737 case 5: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 5);
3738 case 7: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 7);
3739 case 9: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 9);
3740 case 10: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(10);
3741 case 11: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(11);
3742 case 16: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(16);
3743 case 17: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(17);
3744 case 18: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(18);
3745 default : BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 0);
3747 #undef BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL 3749 }
else if (is_hip<execution_space>::value) {
3750 #if defined(KOKKOS_ENABLE_HIP) 3751 const local_ordinal_type blocksize = blocksize_requested;
3752 const local_ordinal_type team_size = 8;
3753 const local_ordinal_type vector_size = ComputeResidualVectorRecommendedHIPVectorSize(blocksize, team_size);
3758 #define BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(B) \ 3759 if (compute_owned) { \ 3760 const Kokkos::TeamPolicy<execution_space,OverlapTag<0,B> > \ 3761 policy(rowidx2part.extent(0), team_size, vector_size); \ 3762 Kokkos::parallel_for \ 3763 ("ComputeResidual::TeamPolicy::run<OverlapTag<0> >", policy, *this); \ 3765 const Kokkos::TeamPolicy<execution_space,OverlapTag<1,B> > \ 3766 policy(rowidx2part.extent(0), team_size, vector_size); \ 3767 Kokkos::parallel_for \ 3768 ("ComputeResidual::TeamPolicy::run<OverlapTag<1> >", policy, *this); \ 3770 switch (blocksize_requested) {
3771 case 3: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 3);
3772 case 5: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 5);
3773 case 7: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 7);
3774 case 9: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 9);
3775 case 10: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(10);
3776 case 11: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(11);
3777 case 16: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(16);
3778 case 17: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(17);
3779 case 18: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(18);
3780 default : BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 0);
3782 #undef BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL 3785 #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) 3786 TEUCHOS_TEST_FOR_EXCEPT_MSG(
true,
"Error: device compiler should not see this code");
3788 #define BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(B) \ 3789 if (compute_owned) { \ 3790 const Kokkos::RangePolicy<execution_space,OverlapTag<0,B> > \ 3791 policy(0, rowidx2part.extent(0)); \ 3792 Kokkos::parallel_for \ 3793 ("ComputeResidual::RangePolicy::run<OverlapTag<0> >", policy, *this); \ 3795 const Kokkos::RangePolicy<execution_space,OverlapTag<1,B> > \ 3796 policy(0, rowidx2part.extent(0)); \ 3797 Kokkos::parallel_for \ 3798 ("ComputeResidual::RangePolicy::run<OverlapTag<1> >", policy, *this); \ 3801 switch (blocksize_requested) {
3802 case 3: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 3);
3803 case 5: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 5);
3804 case 7: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 7);
3805 case 9: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 9);
3806 case 10: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(10);
3807 case 11: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(11);
3808 case 16: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(16);
3809 case 17: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(17);
3810 case 18: BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL(18);
3811 default : BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL( 0);
3813 #undef BLOCKTRIDICONTAINER_DETAILS_COMPUTERESIDUAL 3816 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END;
3820 template<
typename MatrixType>
3821 void reduceVector(
const ConstUnmanaged<
typename ImplType<MatrixType>::impl_scalar_type_1d_view> zz,
3822 typename ImplType<MatrixType>::magnitude_type *vals) {
3823 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_BEGIN;
3824 IFPACK2_BLOCKTRIDICONTAINER_TIMER(
"BlockTriDi::ReduceVector");
3826 using impl_type = ImplType<MatrixType>;
3827 using local_ordinal_type =
typename impl_type::local_ordinal_type;
3828 using impl_scalar_type =
typename impl_type::impl_scalar_type;
3830 const auto norm2 = KokkosBlas::nrm1(zz);
3832 impl_scalar_type norm2(0);
3833 Kokkos::parallel_reduce
3834 (
"ReduceMultiVector::Device",
3835 Kokkos::RangePolicy<typename impl_type::execution_space>(0,zz.extent(0)),
3836 KOKKOS_LAMBDA(
const local_ordinal_type &i, impl_scalar_type &update) {
3840 vals[0] = Kokkos::ArithTraits<impl_scalar_type>::abs(norm2);
3842 IFPACK2_BLOCKTRIDICONTAINER_PROFILER_REGION_END;
3848 template<
typename MatrixType>
3853 using magnitude_type =
typename impl_type::magnitude_type;
3857 int sweep_step_, sweep_step_upper_bound_;
3858 #ifdef HAVE_IFPACK2_MPI 3859 MPI_Request mpi_request_;
3862 magnitude_type work_[3];
3867 NormManager(
const Teuchos::RCP<
const Teuchos::Comm<int> >& comm) {
3869 sweep_step_upper_bound_ = 1;
3870 collective_ = comm->getSize() > 1;
3872 #ifdef HAVE_IFPACK2_MPI 3873 const auto mpi_comm = Teuchos::rcp_dynamic_cast<
const Teuchos::MpiComm<int> >(comm);
3874 TEUCHOS_ASSERT( ! mpi_comm.is_null());
3875 comm_ = *mpi_comm->getRawMpiComm();
3878 const magnitude_type zero(0), minus_one(-1);
3881 work_[2] = minus_one;
3885 void setCheckFrequency(
const int sweep_step) {
3886 TEUCHOS_TEST_FOR_EXCEPT_MSG(sweep_step < 1, "sweep step must be >= 1
"); 3887 sweep_step_upper_bound_ = sweep_step; 3891 // Get the buffer into which to store rank-local squared norms. 3892 magnitude_type* getBuffer() { return &work_[0]; } 3894 // Call MPI_Iallreduce to find the global squared norms. 3895 void ireduce(const int sweep, const bool force = false) { 3896 if ( ! force && sweep % sweep_step_) return; 3898 IFPACK2_BLOCKTRIDICONTAINER_TIMER("BlockTriDi::NormManager::Ireduce
"); 3900 work_[1] = work_[0]; 3901 #ifdef HAVE_IFPACK2_MPI 3902 auto send_data = &work_[1]; 3903 auto recv_data = &work_[0]; 3905 # if defined(IFPACK2_BLOCKTRIDICONTAINER_USE_MPI_3) 3906 MPI_Iallreduce(send_data, recv_data, 1, 3907 Teuchos::Details::MpiTypeTraits<magnitude_type>::getType(), 3908 MPI_SUM, comm_, &mpi_request_); 3910 MPI_Allreduce (send_data, recv_data, 1, 3911 Teuchos::Details::MpiTypeTraits<magnitude_type>::getType(), 3918 // Check if the norm-based termination criterion is met. tol2 is the 3919 // tolerance squared. Sweep is the sweep index. If not every iteration is 3920 // being checked, this function immediately returns false. If a check must 3921 // be done at this iteration, it waits for the reduction triggered by 3922 // ireduce to complete, then checks the global norm against the tolerance. 3923 bool checkDone (const int sweep, const magnitude_type tol2, const bool force = false) { 3925 if (sweep <= 0) return false; 3927 IFPACK2_BLOCKTRIDICONTAINER_TIMER("BlockTriDi::NormManager::CheckDone
"); 3929 TEUCHOS_ASSERT(sweep >= 1); 3930 if ( ! force && (sweep - 1) % sweep_step_) return false; 3932 #ifdef HAVE_IFPACK2_MPI 3933 # if defined(IFPACK2_BLOCKTRIDICONTAINER_USE_MPI_3) 3934 MPI_Wait(&mpi_request_, MPI_STATUS_IGNORE); 3942 work_[2] = work_[0]; 3944 r_val = (work_[0] < tol2*work_[2]); 3947 // adjust sweep step 3948 const auto adjusted_sweep_step = 2*sweep_step_; 3949 if (adjusted_sweep_step < sweep_step_upper_bound_) { 3950 sweep_step_ = adjusted_sweep_step; 3952 sweep_step_ = sweep_step_upper_bound_; 3957 // After termination has occurred, finalize the norms for use in 3958 // get_norms{0,final}. 3960 work_[0] = std::sqrt(work_[0]); // after converged 3962 work_[2] = std::sqrt(work_[2]); // first norm 3963 // if work_[2] is minus one, then norm is not requested. 3966 // Report norms to the caller. 3967 const magnitude_type getNorms0 () const { return work_[2]; } 3968 const magnitude_type getNormsFinal () const { return work_[0]; } 3974 template<typename MatrixType> 3976 applyInverseJacobi(// importer 3977 const Teuchos::RCP<const typename ImplType<MatrixType>::tpetra_block_crs_matrix_type> &A, 3978 const Teuchos::RCP<const typename ImplType<MatrixType>::tpetra_import_type> &tpetra_importer, 3979 const Teuchos::RCP<AsyncableImport<MatrixType> > &async_importer, 3980 const bool overlap_communication_and_computation, 3982 const typename ImplType<MatrixType>::tpetra_multivector_type &X, // tpetra interface 3983 /* */ typename ImplType<MatrixType>::tpetra_multivector_type &Y, // tpetra interface 3984 /* */ typename ImplType<MatrixType>::tpetra_multivector_type &Z, // temporary tpetra interface (seq_method) 3985 /* */ typename ImplType<MatrixType>::impl_scalar_type_1d_view &W, // temporary tpetra interface (diff) 3986 // local object interface 3987 const PartInterface<MatrixType> &interf, // mesh interface 3988 const BlockTridiags<MatrixType> &btdm, // packed block tridiagonal matrices 3989 const AmD<MatrixType> &amd, // R = A - D 3990 /* */ typename ImplType<MatrixType>::vector_type_1d_view &work, // workspace for packed multivector of right hand side 3991 /* */ NormManager<MatrixType> &norm_manager, 3992 // preconditioner parameters 3993 const typename ImplType<MatrixType>::impl_scalar_type &damping_factor, 3994 /* */ bool is_y_zero, 3995 const int max_num_sweeps, 3996 const typename ImplType<MatrixType>::magnitude_type tol, 3997 const int check_tol_every) { 3998 IFPACK2_BLOCKTRIDICONTAINER_TIMER("BlockTriDi::ApplyInverseJacobi
"); 4000 using impl_type = ImplType<MatrixType>; 4001 using node_memory_space = typename impl_type::node_memory_space; 4002 using local_ordinal_type = typename impl_type::local_ordinal_type; 4003 using size_type = typename impl_type::size_type; 4004 using impl_scalar_type = typename impl_type::impl_scalar_type; 4005 using magnitude_type = typename impl_type::magnitude_type; 4006 using local_ordinal_type_1d_view = typename impl_type::local_ordinal_type_1d_view; 4007 using vector_type_1d_view = typename impl_type::vector_type_1d_view; 4008 using vector_type_3d_view = typename impl_type::vector_type_3d_view; 4009 using tpetra_multivector_type = typename impl_type::tpetra_multivector_type; 4011 using impl_scalar_type_1d_view = typename impl_type::impl_scalar_type_1d_view; 4013 // either tpetra importer or async importer must be active 4014 TEUCHOS_TEST_FOR_EXCEPT_MSG(!tpetra_importer.is_null() && !async_importer.is_null(), 4015 "Neither Tpetra importer nor Async importer is null.
"); 4016 // max number of sweeps should be positive number 4017 TEUCHOS_TEST_FOR_EXCEPT_MSG(max_num_sweeps <= 0, 4018 "Maximum number of sweeps must be >= 1.
"); 4021 const bool is_seq_method_requested = !tpetra_importer.is_null(); 4022 const bool is_async_importer_active = !async_importer.is_null(); 4023 const bool is_norm_manager_active = tol > Kokkos::ArithTraits<magnitude_type>::zero(); 4024 const magnitude_type tolerance = tol*tol; 4025 const local_ordinal_type blocksize = btdm.values.extent(1); 4026 const local_ordinal_type num_vectors = Y.getNumVectors(); 4027 const local_ordinal_type num_blockrows = interf.part2packrowidx0_back; 4029 const impl_scalar_type zero(0.0); 4031 TEUCHOS_TEST_FOR_EXCEPT_MSG(is_norm_manager_active && is_seq_method_requested, 4033 "which in any
case is
for developer use only,
" << 4034 "does not support norm-based termination.
"); 4035 const bool device_accessible_from_host = Kokkos::SpaceAccessibility< 4036 Kokkos::DefaultHostExecutionSpace, node_memory_space>::accessible; 4037 TEUCHOS_TEST_FOR_EXCEPTION(is_seq_method_requested && !device_accessible_from_host, 4038 std::invalid_argument, 4040 "which in any
case is
for developer use only,
" << 4041 "only supports memory spaces accessible from host.
"); 4043 // if workspace is needed more, resize it 4044 const size_type work_span_required = num_blockrows*num_vectors*blocksize; 4045 if (work.span() < work_span_required) 4046 work = vector_type_1d_view("vector workspace 1d view
", work_span_required); 4049 const local_ordinal_type W_size = interf.packptr.extent(0)-1; 4050 if (local_ordinal_type(W.extent(0)) < W_size) 4051 W = impl_scalar_type_1d_view("W
", W_size); 4053 typename impl_type::impl_scalar_type_2d_view_tpetra remote_multivector; 4055 if (is_seq_method_requested) { 4056 if (Z.getNumVectors() != Y.getNumVectors()) 4057 Z = tpetra_multivector_type(tpetra_importer->getTargetMap(), num_vectors, false); 4059 if (is_async_importer_active) { 4060 // create comm data buffer and keep it here 4061 async_importer->createDataBuffer(num_vectors); 4062 remote_multivector = async_importer->getRemoteMultiVectorLocalView(); 4067 // wrap the workspace with 3d view 4068 vector_type_3d_view pmv(work.data(), num_blockrows, blocksize, num_vectors); 4069 const auto XX = X.getLocalViewDevice(Tpetra::Access::ReadOnly); 4070 const auto YY = Y.getLocalViewDevice(Tpetra::Access::ReadWrite); 4071 const auto ZZ = Z.getLocalViewDevice(Tpetra::Access::ReadWrite); 4072 if (is_y_zero) Kokkos::deep_copy(YY, zero); 4074 MultiVectorConverter<MatrixType> multivector_converter(interf, pmv); 4075 SolveTridiags<MatrixType> solve_tridiags(interf, btdm, pmv, 4076 damping_factor, is_norm_manager_active); 4078 const local_ordinal_type_1d_view dummy_local_ordinal_type_1d_view; 4079 ComputeResidualVector<MatrixType> 4080 compute_residual_vector(amd, A->getCrsGraph().getLocalGraphDevice(), blocksize, interf, 4081 is_async_importer_active ? async_importer->dm2cm : dummy_local_ordinal_type_1d_view); 4083 // norm manager workspace resize 4084 if (is_norm_manager_active) 4085 norm_manager.setCheckFrequency(check_tol_every); 4089 for (;sweep<max_num_sweeps;++sweep) { 4093 multivector_converter.run(XX); 4095 if (is_seq_method_requested) { 4096 // SEQ METHOD IS TESTING ONLY 4099 Z.doImport(Y, *tpetra_importer, Tpetra::REPLACE); 4100 compute_residual_vector.run(YY, XX, ZZ); 4102 // pmv := y(lclrow). 4103 multivector_converter.run(YY); 4105 // fused y := x - R y and pmv := y(lclrow); 4106 // real use case does not use overlap comp and comm 4107 if (overlap_communication_and_computation || !is_async_importer_active) { 4108 if (is_async_importer_active) async_importer->asyncSendRecv(YY); 4109 compute_residual_vector.run(pmv, XX, YY, remote_multivector, true); 4110 if (is_norm_manager_active && norm_manager.checkDone(sweep, tolerance)) { 4111 if (is_async_importer_active) async_importer->cancel(); 4114 if (is_async_importer_active) { 4115 async_importer->syncRecv(); 4116 compute_residual_vector.run(pmv, XX, YY, remote_multivector, false); 4119 if (is_async_importer_active) 4120 async_importer->syncExchange(YY); 4121 if (is_norm_manager_active && norm_manager.checkDone(sweep, tolerance)) break; 4122 compute_residual_vector.run(pmv, XX, YY, remote_multivector); 4128 // pmv := inv(D) pmv. 4130 solve_tridiags.run(YY, W); 4133 if (is_norm_manager_active) { 4134 // y(lclrow) = (b - a) y(lclrow) + a pmv, with b = 1 always. 4135 reduceVector<MatrixType>(W, norm_manager.getBuffer()); 4136 if (sweep + 1 == max_num_sweeps) { 4137 norm_manager.ireduce(sweep, true); 4138 norm_manager.checkDone(sweep + 1, tolerance, true); 4140 norm_manager.ireduce(sweep); 4147 //sqrt the norms for the caller's use. 4148 if (is_norm_manager_active) norm_manager.finalize(); 4154 template<typename MatrixType> 4156 using impl_type = ImplType<MatrixType>; 4157 using part_interface_type = PartInterface<MatrixType>; 4158 using block_tridiags_type = BlockTridiags<MatrixType>; 4159 using amd_type = AmD<MatrixType>; 4160 using norm_manager_type = NormManager<MatrixType>; 4161 using async_import_type = AsyncableImport<MatrixType>; 4163 // distructed objects 4164 Teuchos::RCP<const typename impl_type::tpetra_block_crs_matrix_type> A; 4165 Teuchos::RCP<const typename impl_type::tpetra_import_type> tpetra_importer; 4166 Teuchos::RCP<async_import_type> async_importer; 4167 bool overlap_communication_and_computation; 4169 // copy of Y (mutable to penentrate const) 4170 mutable typename impl_type::tpetra_multivector_type Z; 4171 mutable typename impl_type::impl_scalar_type_1d_view W; 4174 part_interface_type part_interface; 4175 block_tridiags_type block_tridiags; // D 4176 amd_type a_minus_d; // R = A - D 4177 mutable typename impl_type::vector_type_1d_view work; // right hand side workspace 4178 mutable norm_manager_type norm_manager; 4181 } // namespace BlockTriDiContainerDetails 4183 } // namespace Ifpack2 Definition: Ifpack2_BlockTriDiContainer_impl.hpp:166
BlockTridiags< MatrixType > createBlockTridiags(const PartInterface< MatrixType > &interf)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:1375
int applyInverseJacobi(const Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_block_crs_matrix_type > &A, const Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_import_type > &tpetra_importer, const Teuchos::RCP< AsyncableImport< MatrixType > > &async_importer, const bool overlap_communication_and_computation, const typename ImplType< MatrixType >::tpetra_multivector_type &X, typename ImplType< MatrixType >::tpetra_multivector_type &Y, typename ImplType< MatrixType >::tpetra_multivector_type &Z, typename ImplType< MatrixType >::impl_scalar_type_1d_view &W, const PartInterface< MatrixType > &interf, const BlockTridiags< MatrixType > &btdm, const AmD< MatrixType > &amd, typename ImplType< MatrixType >::vector_type_1d_view &work, NormManager< MatrixType > &norm_manager, const typename ImplType< MatrixType >::impl_scalar_type &damping_factor, bool is_y_zero, const int max_num_sweeps, const typename ImplType< MatrixType >::magnitude_type tol, const int check_tol_every)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:3976
PartInterface< MatrixType > createPartInterface(const Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_block_crs_matrix_type > &A, const Teuchos::Array< Teuchos::Array< typename ImplType< MatrixType >::local_ordinal_type > > &partitions)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:1164
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:240
void performNumericPhase(const Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_block_crs_matrix_type > &A, const PartInterface< MatrixType > &interf, BlockTridiags< MatrixType > &btdm, const typename ImplType< MatrixType >::magnitude_type tiny)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:2258
static int ComputeResidualVectorRecommendedCudaVectorSize(const int blksize, const int team_size)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:3027
Kokkos::View< size_type *, device_type > size_type_1d_view
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:401
Kokkos::Details::ArithTraits< scalar_type >::val_type impl_scalar_type
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:345
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:192
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:175
size_t size_type
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:336
std::string get_msg_prefix(const CommPtrType &comm)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:228
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:276
Kokkos::ViewAllocateWithoutInitializing do_not_initialize_tag
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:122
KB::Vector< T, l > Vector
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:388
Teuchos::RCP< AsyncableImport< MatrixType > > createBlockCrsAsyncImporter(const Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_block_crs_matrix_type > &A)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:1050
node_type::device_type node_device_type
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:359
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:3849
Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_import_type > createBlockCrsTpetraImporter(const Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_block_crs_matrix_type > &A)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:425
Kokkos::DefaultHostExecutionSpace host_execution_space
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:354
void performSymbolicPhase(const Teuchos::RCP< const typename ImplType< MatrixType >::tpetra_block_crs_matrix_type > &A, const PartInterface< MatrixType > &interf, BlockTridiags< MatrixType > &btdm, AmD< MatrixType > &amd, const bool overlap_communication_and_computation)
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:1562
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:1858
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:1532
Preconditioners and smoothers for Tpetra sparse matrices.
Definition: Ifpack2_AdditiveSchwarz_decl.hpp:73
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:1331
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:2420
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:332
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:2271
Definition: Ifpack2_BlockTriDiContainer_impl.hpp:183