10 #ifndef STOKHOS_CUDA_CRS_PRODUCT_TENSOR_HPP
11 #define STOKHOS_CUDA_CRS_PRODUCT_TENSOR_HPP
15 #include "Kokkos_Core.hpp"
26 #include "cuda_profiler_api.h"
38 template<
typename TensorScalar,
39 typename MatrixScalar,
40 typename VectorScalar >
43 MatrixScalar, Kokkos::Cuda >,
44 Kokkos::View<VectorScalar**, Kokkos::LayoutLeft, Kokkos::Cuda>,
45 Kokkos::View<VectorScalar**, Kokkos::LayoutLeft, Kokkos::Cuda> >
54 typedef Kokkos::View< VectorScalar**, Kokkos::LayoutLeft, Kokkos::Cuda >
vector_type;
61 class MultiplyKernel {
73 : m_A( A ), m_x( x ), m_y( y ), BlockSize(block_size) {}
79 const size_type dim = m_A.block.dimension();
82 volatile VectorScalar *
const sh_x =
83 kokkos_impl_cuda_shared_memory<VectorScalar>();
84 volatile MatrixScalar *
const sh_A = sh_x + BlockSize*dim;
85 volatile VectorScalar *
const sh_y = sh_A + BlockSize*dim;
86 #if !HAVE_CUDA_SHUFFLE
87 volatile VectorScalar *
const sh_t = sh_y + dim;
90 const size_type nid = blockDim.x * blockDim.y;
91 const size_type tid = threadIdx.x + blockDim.x * threadIdx.y;
94 const int mask = blockDim.x == 32 ? 0xffffffff :
95 ((1<<blockDim.x)-1)<<(threadIdx.y%(32/blockDim.x))*blockDim.x;
98 for (
size_type i = tid; i < dim; i += nid ) {
104 const size_type iBlockEntryBeg = m_A.graph.row_map[ blockIdx.x ];
105 const size_type iBlockEntryEnd = m_A.graph.row_map[ blockIdx.x + 1 ];
106 for (
size_type iBlockEntry=iBlockEntryBeg; iBlockEntry<iBlockEntryEnd;
107 iBlockEntry += BlockSize) {
109 (iBlockEntryEnd-iBlockEntry < BlockSize) ?
110 iBlockEntryEnd-iBlockEntry : BlockSize;
117 for (
size_type col = 0; col < block_size; ++col ) {
119 const size_type iBlockColumn = m_A.graph.entries( iBlockEntry + col );
120 const VectorScalar *
const x = & m_x( 0, iBlockColumn );
121 const MatrixScalar *
const A = & m_A.values( 0, iBlockEntry + col );
124 for (
size_type i = tid; i < dim; i += nid ) {
125 sh_x[col + i * BlockSize] = x[i];
126 sh_A[col + i * BlockSize] = A[i];
134 for (
size_type i = threadIdx.y; i < dim; i += blockDim.y ) {
138 const size_type lBeg = m_A.block.entry_begin( i );
139 const size_type lEnd = m_A.block.entry_end( i );
142 for (
size_type l = lBeg+threadIdx.x; l < lEnd; l += blockDim.x ) {
145 const size_type kj = m_A.block.coord( l );
146 const TensorScalar v = m_A.block.value( l );
147 const size_type j = ( kj & 0x0ffff ) * BlockSize ;
148 const size_type k = ( kj >> 16 ) * BlockSize ;
150 for (
size_type col = 0; col < block_size; ++col ) {
151 y += v * ( sh_A[col+
j] * sh_x[col+k] +
152 sh_A[col+k] * sh_x[col+
j] );
158 #if HAVE_CUDA_SHUFFLE
159 if (blockDim.x >= 2) y +=
shfl_down(y, 1, blockDim.x, mask);
160 if (blockDim.x >= 4) y +=
shfl_down(y, 2, blockDim.x, mask);
161 if (blockDim.x >= 8) y +=
shfl_down(y, 4, blockDim.x, mask);
162 if (blockDim.x >= 16) y +=
shfl_down(y, 8, blockDim.x, mask);
163 if (blockDim.x >= 32) y +=
shfl_down(y, 16, blockDim.x, mask);
164 if ( threadIdx.x == 0 ) sh_y[i] += y;
167 if (threadIdx.x+16 < blockDim.x) sh_t[tid] += sh_t[tid+16];
169 if (threadIdx.x+ 8 < blockDim.x) sh_t[tid] += sh_t[tid+ 8];
171 if (threadIdx.x+ 4 < blockDim.x) sh_t[tid] += sh_t[tid+ 4];
173 if (threadIdx.x+ 2 < blockDim.x) sh_t[tid] += sh_t[tid+ 2];
175 if (threadIdx.x+ 1 < blockDim.x) sh_t[tid] += sh_t[tid+ 1];
177 if (threadIdx.x == 0) sh_y[i] += sh_t[tid];
188 for (
size_type i = tid; i < dim; i += nid ) {
189 m_y( i, blockIdx.x ) = sh_y[ i ];
198 class MultiplyKernel {
201 const matrix_type m_A;
202 const vector_type m_x;
203 const vector_type m_y;
204 const size_type BlockSize;
206 MultiplyKernel(
const matrix_type &
A,
207 const vector_type & x,
208 const vector_type & y,
209 const size_type block_size )
210 : m_A( A ), m_x( x ), m_y( y ), BlockSize(block_size) {}
213 void operator()(
void)
const
216 const size_type dim = m_A.block.dimension();
218 volatile VectorScalar *
const sh_x =
219 kokkos_impl_cuda_shared_memory<VectorScalar>();
220 volatile VectorScalar *
const sh_y = sh_x + BlockSize*dim;
221 #if !HAVE_CUDA_SHUFFLE
222 volatile VectorScalar *
const sh_t = sh_y + dim;
225 const size_type nid = blockDim.x * blockDim.y;
226 const size_type tid = threadIdx.x + blockDim.x * threadIdx.y;
229 const int mask = blockDim.x == 32 ? 0xffffffff :
230 ((1<<blockDim.x)-1)<<(threadIdx.y%(32/blockDim.x))*blockDim.x;
233 for ( size_type i = tid; i < dim; i += nid ) {
239 const size_type iBlockEntryBeg = m_A.graph.row_map[ blockIdx.x ];
240 const size_type iBlockEntryEnd = m_A.graph.row_map[ blockIdx.x + 1 ];
241 for (size_type iBlockEntry=iBlockEntryBeg; iBlockEntry<iBlockEntryEnd;
242 iBlockEntry += BlockSize) {
243 const size_type block_size =
244 (iBlockEntryEnd-iBlockEntry < BlockSize) ?
245 iBlockEntryEnd-iBlockEntry : BlockSize;
252 for ( size_type col = 0; col < block_size; ++col ) {
254 const size_type iBlockColumn = m_A.graph.entries( iBlockEntry + col );
255 const VectorScalar *
const x = & m_x( 0, iBlockColumn );
258 for ( size_type i = tid; i < dim; i += nid ) {
259 sh_x[col + i * BlockSize] = x[i];
267 for ( size_type i = threadIdx.y; i < dim; i += blockDim.y ) {
271 const size_type lBeg = m_A.block.entry_begin( i );
272 const size_type lEnd = m_A.block.entry_end( i );
275 for ( size_type l = lBeg+threadIdx.x; l < lEnd; l += blockDim.x ) {
278 const size_type kj = m_A.block.coord( l );
279 const TensorScalar v = m_A.block.value( l );
280 const size_type
j = ( kj & 0x0ffff ) ;
281 const size_type k = ( kj >> 16 ) ;
283 for ( size_type col = 0; col < block_size; ++col ) {
284 const size_type bCol = iBlockEntry + col;
285 #if (__CUDA_ARCH__ >= 350)
286 y += v * ( __ldg(&m_A.values(j,bCol)) * sh_x[col+k*BlockSize] +
287 __ldg(&m_A.values(k,bCol)) * sh_x[col+j*BlockSize] );
289 y += v * ( m_A.values(j,bCol) * sh_x[col+k*BlockSize] +
290 m_A.values(k,bCol) * sh_x[col+j*BlockSize] );
297 #if HAVE_CUDA_SHUFFLE
298 if (blockDim.x >= 2) y +=
shfl_down(y, 1, blockDim.x, mask);
299 if (blockDim.x >= 4) y +=
shfl_down(y, 2, blockDim.x, mask);
300 if (blockDim.x >= 8) y +=
shfl_down(y, 4, blockDim.x, mask);
301 if (blockDim.x >= 16) y +=
shfl_down(y, 8, blockDim.x, mask);
302 if (blockDim.x >= 32) y +=
shfl_down(y, 16, blockDim.x, mask);
303 if ( threadIdx.x == 0 ) sh_y[i] += y;
306 if (threadIdx.x+16 < blockDim.x) sh_t[tid] += sh_t[tid+16];
308 if (threadIdx.x+ 8 < blockDim.x) sh_t[tid] += sh_t[tid+ 8];
310 if (threadIdx.x+ 4 < blockDim.x) sh_t[tid] += sh_t[tid+ 4];
312 if (threadIdx.x+ 2 < blockDim.x) sh_t[tid] += sh_t[tid+ 2];
314 if (threadIdx.x+ 1 < blockDim.x) sh_t[tid] += sh_t[tid+ 1];
316 if (threadIdx.x == 0) sh_y[i] += sh_t[tid];
327 for ( size_type i = tid; i < dim; i += nid ) {
328 m_y( i, blockIdx.x ) = sh_y[ i ];
337 struct TensorReadEntry {
348 const size_type tensor_align = tensor_dimension;
349 const size_type avg_tensor_entries_per_row = A.
block.avg_entries_per_row();
375 Kokkos::Impl::cuda_parallel_launch_local_memory<MultiplyKernel>);
377 (warp_size*regs_per_thread + reg_bank_size-1) & ~(reg_bank_size-1);
379 (max_regs_per_sm/regs_per_warp) & ~(warp_granularity-1);
381 (max_regs_per_block/regs_per_warp) & ~(warp_granularity-1);
390 avg_tensor_entries_per_row >= 88 ? 32 : 16;
391 const size_type rows_per_warp = warp_size / threads_per_row;
393 const size_type vec_scalar_size =
sizeof(VectorScalar);
395 const size_type mat_scalar_size =
sizeof(MatrixScalar);
398 #define USE_FIXED_BLOCKSIZE 0
400 #if USE_FIXED_BLOCKSIZE
403 size_type nw = warps_per_sm / num_blocks;
404 while (nw > 1 && num_blocks*nw % warp_granularity) --nw;
406 const size_type sh_per_block = shcap / num_blocks;
408 device_prop.
has_shuffle ? 0 : vec_scalar_size*warp_size*num_warp;
410 size_type bs = ((sh_per_block - sr) / tensor_align - vec_scalar_size) /
413 size_type bs = ((sh_per_block - sr) / tensor_align - vec_scalar_size) /
414 (vec_scalar_size+mat_scalar_size);
416 if (bs % 2 == 0) --bs;
422 ( (vec_scalar_size*block_size+vec_scalar_size)*tensor_align + sr + sh_granularity-1 ) & ~(sh_granularity-1);
425 ( ((vec_scalar_size+mat_scalar_size)*block_size+vec_scalar_size)*tensor_align + sr + sh_granularity-1 ) & ~(sh_granularity-1);
440 const size_type half_nnz_per_row = fem_nnz_per_row / 2 + 1;
442 half_nnz_per_row % 2 ? half_nnz_per_row + 1 : half_nnz_per_row;
444 for (
size_type bs = block_size_min; bs<=block_size_max; bs+=2) {
448 device_prop.
has_shuffle ? 0 : vec_scalar_size*warp_size*warps_per_block;
451 (vec_scalar_size*bs+vec_scalar_size)*tensor_align+sr;
454 ((vec_scalar_size+mat_scalar_size)*bs+vec_scalar_size)*tensor_align+sr;
456 shmem = (shmem + sh_granularity-1) & ~(sh_granularity-1);
457 if (shmem <= max_shmem_per_block) {
459 size_type tensor_reads = (fem_nnz_per_row+bs-1) / bs;
462 min_warps_per_block),
463 max_warps_per_block);
464 while (num_warp > 1 && num_blocks*num_warp % warp_granularity)
466 TensorReadEntry entry;
467 entry.block_size = bs;
469 entry.num_blocks = num_blocks;
470 entry.num_warp = num_warp;
474 entry.reads = (
static_cast<double>(tensor_reads) /
475 static_cast<double>(factor*num_blocks*num_warp));
480 reads_per_thread.
size() == 0, std::logic_error,
481 "Stochastic problem dimension is too large to fit in shared memory");
483 double min_reads = reads_per_thread[0].reads;
484 for (
int i=1; i<reads_per_thread.
size(); ++i) {
485 if (reads_per_thread[i].reads < min_reads) {
487 min_reads = reads_per_thread[i].reads;
491 const size_type block_size = reads_per_thread[idx].block_size;
492 const size_type shmem = reads_per_thread[idx].shmem;
493 const size_type num_blocks = reads_per_thread[idx].num_blocks;
494 const size_type num_warp = reads_per_thread[idx].num_warp;
499 const dim3 dBlock( threads_per_row , rows_per_warp*num_warp , 1 );
500 const dim3 dGrid( row_count, 1, 1 );
503 std::cout <<
"block_size = " << block_size
504 <<
" tensor reads = " << (fem_nnz_per_row+block_size-1)/block_size
505 <<
" regs_per_thread = " << regs_per_thread
506 <<
" num blocks = " << num_blocks
507 <<
" num warps = " << num_warp
508 <<
" num rows = " << tensor_dimension
509 <<
" rows/warp = " << tensor_dimension / (num_warp*rows_per_warp)
510 <<
" avg entries/row = " << avg_tensor_entries_per_row
516 Kokkos::Impl::cuda_parallel_launch_local_memory<<< dGrid, dBlock, shmem >>>
517 ( MultiplyKernel( A, x, y, block_size ) );
KOKKOS_INLINE_FUNCTION void sync_warp(const int &mask)
KOKKOS_INLINE_FUNCTION Scalar shfl_down(const Scalar &val, const int &delta, const int &width)
Kokkos::View< VectorScalar **, Kokkos::LayoutLeft, Kokkos::Cuda > vector_type
BlockCrsMatrix< tensor_type, MatrixScalar, execution_space > matrix_type
size_type max_blocks_per_sm
static void apply(const matrix_type &A, const vector_type &x, const vector_type &y)
size_type warp_granularity
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
const size_type BlockSize
size_type max_regs_per_sm
CrsProductTensor< TensorScalar, execution_space > tensor_type
size_type max_shmem_per_block
KOKKOS_INLINE_FUNCTION PCE< Storage > min(const typename PCE< Storage >::value_type &a, const PCE< Storage > &b)
Sparse product tensor with replicated entries to provide subsets with a given coordinate.
size_type shared_memory_granularity
__device__ void operator()(void) const
KOKKOS_INLINE_FUNCTION PCE< Storage > max(const typename PCE< Storage >::value_type &a, const PCE< Storage > &b)
size_type shared_memory_capacity
size_type max_threads_per_block
size_type max_warps_per_sm
void push_back(const value_type &x)
size_type max_regs_per_block
MultiplyKernel(const matrix_type &A, const vector_type &x, const vector_type &y, const size_type block_size)
CRS matrix of dense blocks.
execution_space::size_type size_type
size_type get_kernel_registers(Kernel kernel)
Kokkos::Cuda execution_space