10 #ifndef KOKKOS_CRSMATRIX_UQ_PCE_CUDA_HPP
11 #define KOKKOS_CRSMATRIX_UQ_PCE_CUDA_HPP
13 #if defined( __CUDACC__)
18 #include "KokkosSparse_CrsMatrix.hpp"
28 #include "Kokkos_Core.hpp"
38 # if (__CUDA_ARCH__ >= 300)
39 # define HAVE_CUDA_SHUFFLE 1
41 # define HAVE_CUDA_SHUFFLE 0
44 # define HAVE_CUDA_SHUFFLE 0
57 template <
typename MatrixStorage,
58 typename MatrixOrdinal,
59 typename MatrixMemory,
61 typename InputStorage,
63 typename OutputStorage,
65 class Multiply< KokkosSparse::CrsMatrix< const Sacado::UQ::PCE<MatrixStorage>,
70 Kokkos::View< const Sacado::UQ::PCE<InputStorage>*,
72 Kokkos::View< Sacado::UQ::PCE<OutputStorage>*,
81 typedef Kokkos::Cuda MatrixDevice;
83 typedef execution_space::size_type size_type;
85 typedef KokkosSparse::CrsMatrix<
const MatrixValue,
89 MatrixSize> matrix_type;
90 typedef typename matrix_type::values_type matrix_values_type;
92 typedef Kokkos::View<
const InputVectorValue*,
93 InputP... > input_vector_type;
94 typedef Kokkos::View< OutputVectorValue*,
95 OutputP... > output_vector_type;
99 typedef typename matrix_type::StaticCrsGraphType matrix_graph_type;
100 typedef typename matrix_values_type::array_type matrix_array_type;
101 typedef typename input_vector_type::array_type input_array_type;
102 typedef typename output_vector_type::array_type output_array_type;
109 const matrix_array_type m_A_values ;
110 const matrix_graph_type m_A_graph ;
111 const input_array_type m_x ;
112 const output_array_type m_y ;
113 const tensor_type m_tensor ;
114 const input_scalar m_a ;
115 const output_scalar m_b ;
116 const size_type BlockSize;
118 Multiply(
const matrix_type &
A ,
119 const input_vector_type & x ,
120 const output_vector_type & y ,
121 const input_scalar & a ,
122 const output_scalar & b ,
123 const size_type block_size )
124 : m_A_values( A.values )
125 , m_A_graph( A.graph )
128 , m_tensor( Kokkos::
cijk(A.values) )
131 , BlockSize(block_size)
136 __device__
void operator()(
void)
const
139 const size_type dim = m_tensor.dimension();
142 volatile input_scalar *
const sh_x =
143 kokkos_impl_cuda_shared_memory<input_scalar>();
144 volatile matrix_scalar *
const sh_A = sh_x + BlockSize*dim;
145 volatile output_scalar *
const sh_y = sh_A + BlockSize*dim;
146 #if !HAVE_CUDA_SHUFFLE
147 volatile output_scalar *
const sh_t = sh_y + dim;
150 const size_type nid = blockDim.x * blockDim.y;
151 const size_type tid = threadIdx.x + blockDim.x * threadIdx.y;
154 for ( size_type i = tid; i < dim; i += nid ) {
160 const size_type iBlockEntryBeg = m_A_graph.row_map[ blockIdx.x ];
161 const size_type iBlockEntryEnd = m_A_graph.row_map[ blockIdx.x + 1 ];
162 for (size_type iBlockEntry=iBlockEntryBeg; iBlockEntry<iBlockEntryEnd;
163 iBlockEntry += BlockSize) {
164 const size_type block_size =
165 (iBlockEntryEnd-iBlockEntry < BlockSize) ?
166 iBlockEntryEnd-iBlockEntry : BlockSize;
173 for ( size_type col = 0; col < block_size; ++col ) {
175 const size_type iBlockColumn = m_A_graph.entries( iBlockEntry + col );
176 const input_scalar *
const x = & m_x( 0, iBlockColumn );
177 const matrix_scalar *
const A = & m_A_values( iBlockEntry + col, 0 );
180 for ( size_type i = tid; i < dim; i += nid ) {
181 sh_x[col + i * BlockSize] = x[i];
182 sh_A[col + i * BlockSize] = A[i];
190 for ( size_type i = threadIdx.y; i < dim; i += blockDim.y ) {
194 const size_type lBeg = m_tensor.entry_begin( i );
195 const size_type lEnd = m_tensor.entry_end( i );
198 for ( size_type l = lBeg+threadIdx.x; l < lEnd; l += blockDim.x ) {
201 const size_type kj = m_tensor.coord( l );
202 const tensor_scalar v = m_tensor.value( l );
203 const size_type
j = ( kj & 0x0ffff ) * BlockSize ;
204 const size_type k = ( kj >> 16 ) * BlockSize ;
206 for ( size_type col = 0; col < block_size; ++col ) {
207 y += v * ( sh_A[col+
j] * sh_x[col+k] +
208 sh_A[col+k] * sh_x[col+
j] );
214 #if HAVE_CUDA_SHUFFLE
220 if ( threadIdx.x == 0 ) sh_y[i] += y;
223 if (threadIdx.x+16 < blockDim.x) sh_t[tid] += sh_t[tid+16];
224 if (threadIdx.x+ 8 < blockDim.x) sh_t[tid] += sh_t[tid+ 8];
225 if (threadIdx.x+ 4 < blockDim.x) sh_t[tid] += sh_t[tid+ 4];
226 if (threadIdx.x+ 2 < blockDim.x) sh_t[tid] += sh_t[tid+ 2];
227 if (threadIdx.x+ 1 < blockDim.x) sh_t[tid] += sh_t[tid+ 1];
228 if (threadIdx.x == 0) sh_y[i] += sh_t[tid];
239 if ( m_b == output_scalar(0) )
240 for ( size_type i = tid; i < dim; i += nid )
241 m_y( i, blockIdx.x ) = m_a * sh_y[ i ];
243 for ( size_type i = tid; i < dim; i += nid )
244 m_y( i, blockIdx.x ) = m_a * sh_y[ i ] + m_b * m_y( i, blockIdx.x );
247 struct TensorReadEntry {
248 size_type block_size, shmem, num_blocks, num_warp;
252 static void apply(
const matrix_type & A ,
253 const input_vector_type & x ,
254 const output_vector_type & y ,
255 const input_scalar & a = input_scalar(1) ,
256 const output_scalar & b = output_scalar(0) )
259 const size_type row_count = A.graph.row_map.extent(0) - 1;
260 const size_type tensor_dimension = tensor.dimension();
261 const size_type tensor_align = tensor_dimension;
262 const size_type avg_tensor_entries_per_row = tensor.avg_entries_per_row();
265 const size_type fem_nnz_per_row = 27;
268 DeviceProp device_prop;
269 const size_type shcap = device_prop.shared_memory_capacity;
270 const size_type sh_granularity = device_prop.shared_memory_granularity;
271 const size_type max_shmem_per_block = device_prop.max_shmem_per_block;
272 const size_type max_blocks_per_sm = device_prop.max_blocks_per_sm;
273 const size_type warp_size = device_prop.warp_size;
274 const size_type warp_granularity = device_prop.warp_granularity;
275 const size_type max_warps_per_block =
276 std::min(device_prop.max_threads_per_block / warp_size,
277 device_prop.max_warps_per_sm);
278 const size_type min_warps_per_block = 1;
279 const size_type max_regs_per_sm = device_prop.max_regs_per_sm;
280 const size_type max_regs_per_block = device_prop.max_regs_per_block;
281 const size_type reg_bank_size = device_prop.reg_bank_size;
286 const size_type regs_per_thread =
287 device_prop.get_kernel_registers(
288 Kokkos::Impl::cuda_parallel_launch_local_memory<Multiply>);
289 const size_type regs_per_warp =
290 (warp_size*regs_per_thread + reg_bank_size-1) & ~(reg_bank_size-1);
291 const size_type warps_per_sm =
292 (max_regs_per_sm/regs_per_warp) & ~(warp_granularity-1);
293 const size_type warps_per_block =
294 (max_regs_per_block/regs_per_warp) & ~(warp_granularity-1);
302 const size_type threads_per_row =
303 avg_tensor_entries_per_row >= 88 ? 32 : 16;
304 const size_type rows_per_warp = warp_size / threads_per_row;
306 const size_type in_vec_scalar_size =
sizeof(input_scalar);
307 const size_type out_vec_scalar_size =
sizeof(output_scalar);
308 const size_type mat_scalar_size =
sizeof(matrix_scalar);
310 #define USE_FIXED_BLOCKSIZE 0
312 #if USE_FIXED_BLOCKSIZE
314 const size_type num_blocks = 3;
315 size_type nw = warps_per_sm / num_blocks;
316 while (nw > 1 && num_blocks*nw % warp_granularity) --nw;
317 const size_type num_warp = nw;
318 const size_type sh_per_block = shcap / num_blocks;
320 device_prop.has_shuffle ? 0 : in_vec_scalar_size*warp_size*num_warp;
321 size_type bs = ((sh_per_block - sr) / tensor_align - out_vec_scalar_size) /
322 (in_vec_scalar_size+mat_scalar_size);
323 if (bs % 2 == 0) --bs;
324 const size_type block_size_max = 31;
325 const size_type block_size =
std::min(bs, block_size_max);
327 const size_type shmem =
328 ( ((in_vec_scalar_size+mat_scalar_size)*block_size+out_vec_scalar_size)*tensor_align + sr + sh_granularity-1 ) & ~(sh_granularity-1);
341 const size_type block_size_min = 3;
342 const size_type half_nnz_per_row = fem_nnz_per_row / 2 + 1;
343 const size_type block_size_max =
344 half_nnz_per_row % 2 ? half_nnz_per_row + 1 : half_nnz_per_row;
346 for (size_type bs = block_size_min; bs<=block_size_max; bs+=2) {
350 device_prop.has_shuffle ? 0 : in_vec_scalar_size*warp_size*warps_per_block;
352 ((in_vec_scalar_size+mat_scalar_size)*bs+out_vec_scalar_size)*tensor_align+sr;
353 shmem = (shmem + sh_granularity-1) & ~(sh_granularity-1);
354 if (shmem <= max_shmem_per_block) {
355 size_type num_blocks =
std::min(shcap / shmem, max_blocks_per_sm);
356 size_type tensor_reads = (fem_nnz_per_row+bs-1) / bs;
359 min_warps_per_block),
360 max_warps_per_block);
361 while (num_warp > 1 && num_blocks*num_warp % warp_granularity)
363 TensorReadEntry entry;
364 entry.block_size = bs;
366 entry.num_blocks = num_blocks;
367 entry.num_warp = num_warp;
370 size_type factor =
std::min(num_blocks,3u);
371 entry.reads = (
static_cast<double>(tensor_reads) /
372 static_cast<double>(factor*num_blocks*num_warp));
377 reads_per_thread.
size() == 0, std::logic_error,
378 "Stochastic problem dimension is too large to fit in shared memory");
380 double min_reads = reads_per_thread[0].reads;
381 for (
int i=1; i<reads_per_thread.
size(); ++i) {
382 if (reads_per_thread[i].reads < min_reads) {
384 min_reads = reads_per_thread[i].reads;
388 const size_type block_size = reads_per_thread[idx].block_size;
389 const size_type shmem = reads_per_thread[idx].shmem;
390 const size_type num_blocks = reads_per_thread[idx].num_blocks;
391 const size_type num_warp = reads_per_thread[idx].num_warp;
396 const dim3 dBlock( threads_per_row , rows_per_warp*num_warp , 1 );
397 const dim3 dGrid( row_count, 1, 1 );
400 std::cout <<
"block_size = " << block_size
401 <<
" tensor reads = " << (fem_nnz_per_row+block_size-1)/block_size
402 <<
" regs_per_thread = " << regs_per_thread
403 <<
" num blocks = " << num_blocks
404 <<
" num warps = " << num_warp
405 <<
" num rows = " << tensor_dimension
406 <<
" rows/warp = " << tensor_dimension / (num_warp*rows_per_warp)
407 <<
" avg entries/row = " << avg_tensor_entries_per_row
413 Kokkos::Impl::cuda_parallel_launch_local_memory<<< dGrid, dBlock, shmem >>>
414 ( Multiply( A, x, y, a, b, block_size ) );
426 template <
typename MatrixStorage,
427 typename MatrixOrdinal,
428 typename MatrixMemory,
430 typename InputStorage,
432 typename OutputStorage,
433 typename ... OutputP>
434 class Multiply< KokkosSparse::CrsMatrix< const Sacado::UQ::PCE<MatrixStorage>,
439 Kokkos::View< const Sacado::UQ::PCE<InputStorage>**,
441 Kokkos::View< Sacado::UQ::PCE<OutputStorage>**,
450 typedef Kokkos::Cuda MatrixDevice;
452 typedef execution_space::size_type size_type;
454 typedef KokkosSparse::CrsMatrix<
const MatrixValue,
458 MatrixSize> matrix_type;
459 typedef Kokkos::View<
const InputVectorValue**,
460 InputP... > input_vector_type;
461 typedef Kokkos::View< OutputVectorValue**,
462 OutputP... > output_vector_type;
468 static void apply(
const matrix_type & A ,
469 const input_vector_type & x ,
470 const output_vector_type & y ,
471 const input_scalar & a = input_scalar(1) ,
472 const output_scalar & b = output_scalar(0) )
474 typedef Kokkos::View<
const InputVectorValue*, InputP... > input_vector_type_1D;
475 typedef Kokkos::View< OutputVectorValue*, OutputP... > output_vector_type_1D;
476 typedef Multiply< matrix_type, input_vector_type_1D,
477 output_vector_type_1D > multiply_type_1D;
479 const size_type num_col = y.extent(1);
480 for (size_type col=0; col<num_col; ++col)
482 multiply_type_1D::apply(
484 Kokkos::subview( x, Kokkos::ALL(), col),
485 Kokkos::subview(y, Kokkos::ALL(), col),
490 template <
typename Kernel>
492 #if __CUDA_ARCH__ >= 300
493 __launch_bounds__(1024,2)
495 MeanFullOccupancyKernelLaunch(Kernel kernel) {
503 template <
typename MatrixStorage,
504 typename MatrixOrdinal,
505 typename MatrixMemory,
507 typename InputStorage,
509 typename OutputStorage,
510 typename ... OutputP>
511 class MeanMultiply< KokkosSparse::CrsMatrix< const Sacado::UQ::PCE<MatrixStorage>,
516 Kokkos::View< const Sacado::UQ::PCE<InputStorage>*,
518 Kokkos::View< Sacado::UQ::PCE<OutputStorage>*,
527 typedef Kokkos::Cuda MatrixDevice;
529 typedef KokkosSparse::CrsMatrix<
const MatrixValue,
533 MatrixSize> matrix_type;
534 typedef typename matrix_type::values_type matrix_values_type;
536 typedef Kokkos::View<
const InputVectorValue*,
537 InputP... > input_vector_type;
538 typedef Kokkos::View< OutputVectorValue*,
539 OutputP... > output_vector_type;
541 typedef typename matrix_type::StaticCrsGraphType matrix_graph_type;
546 template <
int BlockSize>
550 typedef typename input_vector_type::array_type input_array_type;
551 typedef typename output_vector_type::array_type output_array_type;
553 const matrix_array_type m_A_values ;
554 const matrix_graph_type m_A_graph ;
555 const output_array_type v_y ;
556 const input_array_type v_x ;
557 const input_scalar m_a ;
558 const output_scalar m_b ;
559 const size_type m_row_count;
560 const size_type dim ;
562 Kernel(
const matrix_type & A ,
563 const input_vector_type & x ,
564 const output_vector_type & y ,
565 const input_scalar & a ,
566 const output_scalar & b )
567 : m_A_values( A.values )
568 , m_A_graph( A.graph )
573 , m_row_count( A.graph.row_map.extent(0)-1 )
577 __device__
void operator()(
void)
const
581 volatile matrix_scalar *
const sh_A =
582 kokkos_impl_cuda_shared_memory<matrix_scalar>();
583 volatile size_type *
const sh_col =
584 reinterpret_cast<volatile size_type*
>(sh_A + BlockSize*blockDim.y);
587 const size_type iBlockRow = blockDim.y*blockIdx.x + threadIdx.y;
588 if (iBlockRow < m_row_count) {
590 const size_type iEntryBegin = m_A_graph.row_map[ iBlockRow ];
591 const size_type iEntryEnd = m_A_graph.row_map[ iBlockRow + 1 ];
594 if (m_b == output_scalar(0))
595 for ( size_type pce = threadIdx.x; pce < dim ; pce+=blockDim.x )
596 v_y(pce, iBlockRow) = 0.0;
598 for ( size_type pce = threadIdx.x; pce < dim ; pce+=blockDim.x )
599 v_y(pce, iBlockRow) = m_b*v_y(pce, iBlockRow);
602 for (size_type col_block=iEntryBegin; col_block<iEntryEnd;
603 col_block+=BlockSize) {
604 const size_type num_col = col_block+BlockSize <= iEntryEnd ?
605 BlockSize : iEntryEnd-col_block;
609 for (size_type col=threadIdx.x; col<num_col; col+=blockDim.x) {
610 sh_col[col*blockDim.y+threadIdx.y] =
611 m_A_graph.entries(col_block+col);
612 sh_A[col*blockDim.y+threadIdx.y] =
613 m_A_values(col_block+col);
615 if (blockDim.x > Kokkos::Impl::CudaTraits::WarpSize)
620 for ( size_type pce = threadIdx.x; pce < dim ; pce+=blockDim.x ) {
621 output_scalar s = 0.0;
622 for ( size_type col = 0; col < num_col; ++col ) {
623 const size_type iCol = sh_col[col*blockDim.y+threadIdx.y];
624 const matrix_scalar aA = m_a*sh_A[col*blockDim.y+threadIdx.y];
625 s += aA*v_x(pce, iCol);
627 v_y(pce, iBlockRow) += s;
634 static void apply(
const matrix_type & A ,
635 const input_vector_type & x ,
636 const output_vector_type & y ,
637 const input_scalar & a = input_scalar(1) ,
638 const output_scalar & b = output_scalar(0) )
640 const size_t row_count = A.graph.row_map.extent(0) - 1;
646 size_type threads_per_row;
647 size_type rows_per_block;
649 threads_per_row = 32;
653 threads_per_row = 16;
656 const size_type num_blocks =
657 (row_count + rows_per_block -1 ) / rows_per_block;
660 const dim3 dBlock( threads_per_row , rows_per_block , 1 );
661 const dim3 dGrid( num_blocks, 1, 1 );
666 const int BlockSize = 32;
667 if (
sizeof(matrix_scalar) > 4)
668 cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
670 cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte);
671 const size_t shared =
672 (BlockSize*dBlock.y) * (
sizeof(size_type) +
sizeof(matrix_scalar));
675 MeanFullOccupancyKernelLaunch<<<dGrid, dBlock, shared >>>
676 ( Kernel<BlockSize>( A, x, y, a, b ) );
KOKKOS_INLINE_FUNCTION Scalar shfl_down(const Scalar &val, const int &delta, const int &width)
Kokkos::DefaultExecutionSpace execution_space
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
KOKKOS_INLINE_FUNCTION constexpr std::enable_if< is_view_uq_pce< View< T, P...> >::value, unsigned >::type dimension_scalar(const View< T, P...> &view)
KOKKOS_INLINE_FUNCTION PCE< Storage > min(const typename PCE< Storage >::value_type &a, const PCE< Storage > &b)
KOKKOS_INLINE_FUNCTION PCE< Storage > max(const typename PCE< Storage >::value_type &a, const PCE< Storage > &b)
KOKKOS_INLINE_FUNCTION constexpr std::enable_if< is_view_uq_pce< view_type >::value, typename CijkType< view_type >::type >::type cijk(const view_type &view)
void push_back(const value_type &x)